diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000000..901aa63f927 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,41 @@ +default: + image: registry.sxxfuture.net/lib/docker/lotus-builder/debian:230224-002040 + +stages: + - build + +.build-snippet: + tags: [docker, x86, merak] + stage: build + variables: + GOPATH: $CI_PROJECT_DIR/.go + HTTP_PROXY: $DEFAULT_PROXY + HTTPS_PROXY: $DEFAULT_PROXY + http_proxy: $DEFAULT_PROXY + https_proxy: $DEFAULT_PROXY + RUSTFLAGS: "" + GOFLAGS: "" + cache: + - key: + files: + - go.mod + - go.sum + paths: + - .go/ + - key: build-deps-$CI_COMMIT_REF_SLUG + paths: + - build/.update-modules + - build/.filecoin-install + - extern/serialization-vectors/ + - extern/test-vectors/ + - extern/filecoin-ffi/ + before_script: + # ci cache will create empty dir, which should not exist + - '[ -z "$(ls extern/serialization-vectors)" ] && rm -r extern/serialization-vectors' + - '[ -z "$(ls extern/test-vectors)" ] && rm -r extern/test-vectors' + - '[ -z "$(ls extern/filecoin-ffi)" ] && rm -r extern/filecoin-ffi' + - mkdir -p .go + +include: + - '.gitlab-ci/v18.yml' + - '.gitlab-ci/v18-filbase.yml' diff --git a/.gitlab-ci/Dockerfile b/.gitlab-ci/Dockerfile new file mode 100644 index 00000000000..e9c2657c91a --- /dev/null +++ b/.gitlab-ci/Dockerfile @@ -0,0 +1,17 @@ +FROM registry.sxxfuture.net/lib/docker/lotus-builder/debian:230224-002040 as builder +FROM ubuntu:20.04 + +COPY --from=builder /etc/ssl/certs /etc/ssl/certs +COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/ +COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.15 /lib/ +COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ + +RUN mkdir -p /etc/OpenCL/vendors \ + && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd + +COPY lotus lotus-* /usr/local/bin/ diff --git a/.gitlab-ci/Dockerfile.dockerignore b/.gitlab-ci/Dockerfile.dockerignore new file mode 100644 index 00000000000..6ef1c1c3b5d --- /dev/null +++ b/.gitlab-ci/Dockerfile.dockerignore @@ -0,0 +1,4 @@ +* + +!lotus +!lotus-* diff --git a/.gitlab-ci/v18-filbase.yml b/.gitlab-ci/v18-filbase.yml new file mode 100644 index 00000000000..15040ffc923 --- /dev/null +++ b/.gitlab-ci/v18-filbase.yml @@ -0,0 +1,50 @@ +"v18/filbase: build lotus": + extends: .build-snippet + artifacts: + name: "v18/filbase: binaries" + paths: [lotus, lotus-miner, lotus-worker] + script: + - make lotus lotus-miner lotus-worker + only: + refs: + - v18_of_car_filbase + +"v18/filbase: build external binaries": + extends: .build-snippet + artifacts: + name: "v18/filbase: external binaries" + paths: [lotus-shed, lotus-wallet, lotus-gateway, lotus-stats] + script: + - make lotus-shed lotus-wallet lotus-gateway lotus-stats + only: + refs: + - v18_of_car_filbase + when: manual + +"v18/filbase: build lotus (2k)": + extends: .build-snippet + variables: + GOFLAGS: "-tags=2k" + artifacts: + name: "v18-2k binaries" + paths: [lotus-shed, lotus, lotus-miner, lotus-worker, lotus-seed] + script: + - make lotus-shed lotus lotus-miner lotus-worker lotus-seed + only: + refs: + - v18_of_car_filbase + when: manual + +"v18/filbase: build external binaries (2k)": + extends: .build-snippet + variables: + GOFLAGS: "-tags=2k" + artifacts: + name: "v18-2k external binaries" + paths: [lotus-wallet, lotus-gateway, lotus-stats] + script: + - make lotus-wallet lotus-gateway lotus-stats + only: + refs: + - v18_of_car_filbase + when: manual diff --git a/.gitlab-ci/v18.yml b/.gitlab-ci/v18.yml new file mode 100644 index 00000000000..aa679e84d84 --- /dev/null +++ b/.gitlab-ci/v18.yml @@ -0,0 +1,66 @@ +"v18: build lotus": + extends: .build-snippet + artifacts: + name: "v18 binaries" + paths: [lotus, lotus-miner, lotus-worker] + script: + - make lotus lotus-miner lotus-worker + only: + refs: + - v18 + +"v18: build external binaries": + extends: .build-snippet + artifacts: + name: "v18 external binaries" + paths: [lotus-shed, lotus-wallet, lotus-gateway, lotus-stats] + script: + - make lotus-shed lotus-wallet lotus-gateway lotus-stats + only: + refs: + - v18 + when: manual + +"v18: build lotus (2k)": + extends: .build-snippet + variables: + GOFLAGS: "-tags=2k" + artifacts: + name: "v18-2k binaries" + paths: [lotus-shed, lotus, lotus-miner, lotus-worker, lotus-seed] + script: + - make lotus-shed lotus lotus-miner lotus-worker lotus-seed + only: + refs: + - v18 + when: manual + +"v18: build docker (2k)": + image: geektr/yumemi:builder-20221213 + tags: [docker, x86, merak] + stage: build + needs: ["v18: build lotus (2k)"] + dependencies: ["v18: build lotus (2k)"] + variables: + # DOCKER_BUILDKIT: 1 + docker_file: .gitlab-ci/Dockerfile + docker_tag: "@repo/v18:2k-aio-@datetime" + script: + - yumemi gitlab-ci docker build + only: + refs: + - v18 + +"v18: build external binaries (2k)": + extends: .build-snippet + variables: + GOFLAGS: "-tags=2k" + artifacts: + name: "v18-2k external binaries" + paths: [lotus-wallet, lotus-gateway, lotus-stats] + script: + - make lotus-wallet lotus-gateway lotus-stats + only: + refs: + - v18 + when: manual diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..304c3d7f0aa --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "go.inferGopath": false +} \ No newline at end of file diff --git a/api/api_full.go b/api/api_full.go index 320a206873f..cac0517018e 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" ) //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode @@ -338,6 +339,7 @@ type FullNode interface { ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write + ClientStatelessDealSxx(ctx context.Context, params *StartDealParams) (*network.Proposal, error) //perm:write // ClientGetDealInfo returns the latest information about a given deal. ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read // ClientListDeals returns information about the deals made by the local client. @@ -1039,6 +1041,9 @@ type StartDealParams struct { DealStartEpoch abi.ChainEpoch FastRetrieval bool VerifiedDeal bool + // add by pan + Peerid *peer.ID + // } func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) { diff --git a/api/api_storage.go b/api/api_storage.go index 100be5cca76..6f8f80dc541 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -104,6 +104,7 @@ type StorageMiner interface { // SectorGetExpectedSealDuration gets the expected time for a sector to seal SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin + SectorsUpdateOfSxx(context.Context, abi.SectorNumber, SectorState, string) error //perm:admin // SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can // be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. SectorRemove(context.Context, abi.SectorNumber) error //perm:admin @@ -293,6 +294,7 @@ type StorageMiner interface { RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin + DealsImportDataOfSxx(ctx context.Context, dealPropCid cid.Cid, file string, worker string) error //perm:admin DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin @@ -322,6 +324,8 @@ type StorageMiner interface { CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin + CheckProve(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin + ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read // RecoverFault can be used to declare recoveries manually. It sends messages @@ -458,6 +462,8 @@ type PieceDealInfo struct { DealProposal *market.DealProposal DealSchedule DealSchedule KeepUnsealed bool + RemoteFilepath string + Worker string } // DealSchedule communicates the time interval of a storage deal. The deal must diff --git a/api/api_worker.go b/api/api_worker.go index cca929d39c9..773fdfdf2e8 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -35,6 +35,9 @@ type Worker interface { // storiface.WorkerCalls DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) //perm:admin AddPiece(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) //perm:admin + // add by lin + AddPieceOfSxx(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, path string) (storiface.CallID, error) //perm:admin + // end SealPreCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin SealPreCommit2(ctx context.Context, sector storiface.SectorRef, pc1o storiface.PreCommit1Out) (storiface.CallID, error) //perm:admin SealCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storiface.SectorCids) (storiface.CallID, error) //perm:admin diff --git a/api/proxy_gen.go b/api/proxy_gen.go index b466d6336b9..4ef5aa158b6 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -43,6 +43,7 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/storiface" + marketnetwork "github.com/filecoin-project/go-fil-markets/storagemarket/network" ) var ErrNotSupported = xerrors.New("method not supported") @@ -214,6 +215,8 @@ type FullNodeStruct struct { ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"` + ClientStatelessDealSxx func(p0 context.Context, p1 *StartDealParams) (*marketnetwork.Proposal, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -679,6 +682,8 @@ type StorageMinerStruct struct { CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` + CheckProve func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` + ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"` ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"` @@ -715,6 +720,8 @@ type StorageMinerStruct struct { DealsImportData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"admin"` + DealsImportDataOfSxx func(p0 context.Context, p1 cid.Cid, p2 string, p3 string) error `perm:"admin"` + DealsList func(p0 context.Context) ([]*MarketDeal, error) `perm:"admin"` DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"` @@ -889,6 +896,8 @@ type StorageMinerStruct struct { SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"` + SectorsUpdateOfSxx func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState, p3 string) error `perm:"admin"` + StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` StorageAttach func(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"` @@ -964,6 +973,10 @@ type WorkerStruct struct { Internal struct { AddPiece func(p0 context.Context, p1 storiface.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storiface.Data) (storiface.CallID, error) `perm:"admin"` + // add by lin + AddPieceOfSxx func(p0 context.Context, p1 storiface.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 string) (storiface.CallID, error) `perm:"admin"` + // end + DataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (storiface.CallID, error) `perm:"admin"` DownloadSectorData func(p0 context.Context, p1 storiface.SectorRef, p2 bool, p3 map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) `perm:"admin"` @@ -1767,6 +1780,17 @@ func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealPara return nil, ErrNotSupported } +func (s *FullNodeStruct) ClientStatelessDealSxx(p0 context.Context, p1 *StartDealParams) (*marketnetwork.Proposal, error) { + if s.Internal.ClientStatelessDealSxx == nil { + return nil, ErrNotSupported + } + return s.Internal.ClientStatelessDealSxx(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDealSxx(p0 context.Context, p1 *StartDealParams) (*marketnetwork.Proposal, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { if s.Internal.CreateBackup == nil { return ErrNotSupported @@ -4132,6 +4156,17 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo return *new(map[abi.SectorNumber]string), ErrNotSupported } +func (s *StorageMinerStruct) CheckProve(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { + if s.Internal.CheckProve == nil { + return *new(map[abi.SectorNumber]string), ErrNotSupported + } + return s.Internal.CheckProve(p0, p1, p2, p3, p4) +} + +func (s *StorageMinerStub) CheckProve(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { + return *new(map[abi.SectorNumber]string), ErrNotSupported +} + func (s *StorageMinerStruct) ComputeDataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) { if s.Internal.ComputeDataCid == nil { return *new(abi.PieceInfo), ErrNotSupported @@ -4330,6 +4365,17 @@ func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 st return ErrNotSupported } +func (s *StorageMinerStruct) DealsImportDataOfSxx(p0 context.Context, p1 cid.Cid, p2 string, p3 string) error { + if s.Internal.DealsImportDataOfSxx == nil { + return ErrNotSupported + } + return s.Internal.DealsImportDataOfSxx(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) DealsImportDataOfSxx(p0 context.Context, p1 cid.Cid, p2 string, p3 string) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]*MarketDeal, error) { if s.Internal.DealsList == nil { return *new([]*MarketDeal), ErrNotSupported @@ -5287,6 +5333,17 @@ func (s *StorageMinerStub) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber return ErrNotSupported } +func (s *StorageMinerStruct) SectorsUpdateOfSxx(p0 context.Context, p1 abi.SectorNumber, p2 SectorState, p3 string) error { + if s.Internal.SectorsUpdateOfSxx == nil { + return ErrNotSupported + } + return s.Internal.SectorsUpdateOfSxx(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) SectorsUpdateOfSxx(p0 context.Context, p1 abi.SectorNumber, p2 SectorState, p3 string) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) StorageAddLocal(p0 context.Context, p1 string) error { if s.Internal.StorageAddLocal == nil { return ErrNotSupported @@ -5595,6 +5652,19 @@ func (s *WalletStub) WalletSign(p0 context.Context, p1 address.Address, p2 []byt return nil, ErrNotSupported } +// add by lin +func (s *WorkerStruct) AddPieceOfSxx(p0 context.Context, p1 storiface.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 string) (storiface.CallID, error) { + if s.Internal.AddPieceOfSxx == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.AddPieceOfSxx(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) AddPieceOfSxx(p0 context.Context, p1 storiface.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 string) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} +// end + func (s *WorkerStruct) AddPiece(p0 context.Context, p1 storiface.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storiface.Data) (storiface.CallID, error) { if s.Internal.AddPiece == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/api/v0api/full.go b/api/v0api/full.go index ca137179410..b89a6f96b39 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -28,6 +28,7 @@ import ( marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" ) //go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode @@ -319,6 +320,7 @@ type FullNode interface { ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write + ClientStatelessDealSxx(ctx context.Context, params *api.StartDealParams) (*network.Proposal, error) //perm:write // ClientGetDealInfo returns the latest information about a given deal. ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read // ClientListDeals returns information about the deals made by the local client. diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index b1a07dacc18..602a9f3050b 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -30,6 +30,7 @@ import ( marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" ) var ErrNotSupported = xerrors.New("method not supported") @@ -142,6 +143,8 @@ type FullNodeStruct struct { ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"` + ClientStatelessDealSxx func(p0 context.Context, p1 *api.StartDealParams) (*network.Proposal, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -1056,6 +1059,17 @@ func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDeal return nil, ErrNotSupported } +func (s *FullNodeStruct) ClientStatelessDealSxx(p0 context.Context, p1 *api.StartDealParams) (*network.Proposal, error) { + if s.Internal.ClientStatelessDealSxx == nil { + return nil, ErrNotSupported + } + return s.Internal.ClientStatelessDealSxx(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDealSxx(p0 context.Context, p1 *api.StartDealParams) (*network.Proposal, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { if s.Internal.CreateBackup == nil { return ErrNotSupported diff --git a/cli/client.go b/cli/client.go index 377505363fb..2b0d2c003ed 100644 --- a/cli/client.go +++ b/cli/client.go @@ -354,6 +354,12 @@ The minimum value is 518400 (6 months).`, Name: "provider-collateral", Usage: "specify the requested provider collateral the miner should put up", }, + // add by pan + &cli.StringFlag{ + Name: "peerid", + Usage: "the miner peerid", + }, + // end &CidBaseFlag, }, Action: func(cctx *cli.Context) error { @@ -488,6 +494,16 @@ The minimum value is 518400 (6 months).`, ProviderCollateral: provCol, } + // add by pan + if pidstr := cctx.String("peerid"); pidstr != "" { + peerid, err := peer.Decode(pidstr) + if err != nil { + return err + } + sdParams.Peerid = &peerid + } + // + var proposal *cid.Cid if cctx.Bool("manual-stateless-deal") { if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 { diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index 9065139ebe0..0faa38c2153 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -413,6 +413,7 @@ func handleMarketsInfo(ctx context.Context, nodeApi api.StorageMiner) error { storagemarket.StorageDealTransferring: {}, storagemarket.StorageDealValidating: {}, storagemarket.StorageDealStaged: {}, + storagemarket.StorageDealStagedOfSxx: {}, storagemarket.StorageDealAwaitingPreCommit: {}, storagemarket.StorageDealSealing: {}, storagemarket.StorageDealPublish: {}, diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 3cc796168c8..cb7ba86d3bd 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "os" "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" @@ -165,6 +166,12 @@ func main() { } app.Setup() app.Metadata["repoType"] = repo.StorageMiner + + // add by sxx + os.Setenv("LOTUS_WDPOST", "true") + os.Setenv("LOTUS_WNPOST", "true") + // end + lcli.RunApp(app) } diff --git a/cmd/lotus-miner/market.go b/cmd/lotus-miner/market.go index 706e4923605..fc10c705387 100644 --- a/cmd/lotus-miner/market.go +++ b/cmd/lotus-miner/market.go @@ -361,6 +361,13 @@ var dealsImportDataCmd = &cli.Command{ Name: "import-data", Usage: "Manually import data for a deal", ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "worker", + Usage: "Set specified worker", + Value: "", + }, + }, Action: func(cctx *cli.Context) error { api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { @@ -381,6 +388,12 @@ var dealsImportDataCmd = &cli.Command{ fpath := cctx.Args().Get(1) + worker := cctx.String("worker") + + if os.Getenv("LOTUS_OF_SXX") == "1" { + return api.DealsImportDataOfSxx(ctx, propCid, fpath, worker) + } + return api.DealsImportData(ctx, propCid, fpath) }, diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index 6f6fd663580..2cc9d95a7d1 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -38,6 +38,7 @@ var provingCmd = &cli.Command{ provingDeadlineInfoCmd, provingFaultsCmd, provingCheckProvableCmd, + provingVerifyProveCmd, workersCmd(false), provingComputeCmd, provingRecoverFaultsCmd, @@ -600,6 +601,135 @@ var provingCheckProvableCmd = &cli.Command{ }, } +var provingVerifyProveCmd = &cli.Command{ + Name: "verify", + Usage: "verify sectors prove", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "only-bad", + Usage: "print only bad sectors", + Value: false, + }, + &cli.BoolFlag{ + Name: "slow", + Usage: "run slower checks", + }, + &cli.StringFlag{ + Name: "storage-id", + Usage: "filter sectors by storage path (path id)", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("must pass deadline index") + } + + dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse deadline index: %w", err) + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + sapi, scloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer scloser() + + ctx := lcli.ReqContext(cctx) + + addr, err := sapi.ActorAddress(ctx) + if err != nil { + return err + } + + mid, err := address.IDFromAddress(addr) + if err != nil { + return err + } + + info, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK) + if err != nil { + return err + } + + partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK) + if err != nil { + return err + } + + tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsector\tstatus") + + var filter map[abi.SectorID]struct{} + + if cctx.IsSet("storage-id") { + sl, err := sapi.StorageList(ctx) + if err != nil { + return err + } + decls := sl[storiface.ID(cctx.String("storage-id"))] + + filter = map[abi.SectorID]struct{}{} + for _, decl := range decls { + filter[decl.SectorID] = struct{}{} + } + } + + for parIdx, par := range partitions { + sectors := make(map[abi.SectorNumber]struct{}) + + sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK) + if err != nil { + return err + } + + var tocheck []storiface.SectorRef + var update []bool + for _, info := range sectorInfos { + si := abi.SectorID{ + Miner: abi.ActorID(mid), + Number: info.SectorNumber, + } + + if filter != nil { + if _, found := filter[si]; !found { + continue + } + } + + sectors[info.SectorNumber] = struct{}{} + tocheck = append(tocheck, storiface.SectorRef{ + ProofType: info.SealProof, + ID: si, + }) + update = append(update, info.SectorKeyCID != nil) + } + + bad, err := sapi.CheckProve(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow")) + if err != nil { + return err + } + + for s := range sectors { + if err, exist := bad[s]; exist { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.RedString("bad")+fmt.Sprintf(" (%s)", err)) + } else if !cctx.Bool("only-bad") { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%s\n", dlIdx, parIdx, s, color.GreenString("good")) + } + } + } + + return tw.Flush() + }, +} + var provingComputeCmd = &cli.Command{ Name: "compute", Usage: "Compute simulated proving tasks", diff --git a/cmd/lotus-miner/run.go b/cmd/lotus-miner/run.go index 93dfea2fc4d..546be4b4aed 100644 --- a/cmd/lotus-miner/run.go +++ b/cmd/lotus-miner/run.go @@ -4,6 +4,7 @@ import ( "fmt" _ "net/http/pprof" "os" + "path/filepath" "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" @@ -23,6 +24,8 @@ import ( "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" + + scServer "git.sxxfuture.net/external-archive/github/moran666666/sector-counter/server" ) var runCmd = &cli.Command{ @@ -47,8 +50,62 @@ var runCmd = &cli.Command{ Usage: "manage open file limit", Value: true, }, + // add by sxx + &cli.BoolFlag{ + Name: "wdpost", + Usage: "enable windowPoSt", + Value: false, + }, + &cli.BoolFlag{ + Name: "wnpost", + Usage: "enable winningPoSt", + Value: false, + }, + &cli.StringFlag{ + Name: "sctype", + Usage: "sector counter type(alloce,get)", + Value: "", + }, + &cli.StringFlag{ + Name: "sclisten", + Usage: "host address and port the sector counter will listen on", + Value: "", + }, + // end }, Action: func(cctx *cli.Context) error { + // add by sxx + if cctx.Bool("wdpost") { + os.Setenv("LOTUS_WDPOST", "true") + } else { + os.Unsetenv("LOTUS_WDPOST") + } + + if cctx.Bool("wnpost") { + os.Setenv("LOTUS_WNPOST", "true") + } else { + os.Unsetenv("LOTUS_WNPOST") + } + + scType := cctx.String("sctype") + if scType == "alloce" || scType == "get" { + os.Setenv("SC_TYPE", scType) + + scListen := cctx.String("sclisten") + if scListen == "" { + log.Errorf("sclisten must be set") + return nil + } + os.Setenv("SC_LISTEN", scListen) + + if scType == "alloce" { + scFilePath := filepath.Join(cctx.String(FlagMinerRepo), "sectorid") + go scServer.Run(scFilePath) + } + } else { + os.Unsetenv("SC_TYPE") + } + // end if !cctx.Bool("enable-gpu-proving") { err := os.Setenv("BELLMAN_NO_GPU", "true") if err != nil { diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 44bce55bc92..f07d22e8aa5 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -35,6 +35,7 @@ import ( "github.com/filecoin-project/lotus/lib/strle" "github.com/filecoin-project/lotus/lib/tablewriter" sealing "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/sealer/storiface" ) var sectorsCmd = &cli.Command{ @@ -68,6 +69,14 @@ var sectorsCmd = &cli.Command{ var sectorsPledgeCmd = &cli.Command{ Name: "pledge", Usage: "store random data in a sector", + // add by pan + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "worker", + Value: "", + }, + }, + // end Action: func(cctx *cli.Context) error { minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -81,6 +90,20 @@ var sectorsPledgeCmd = &cli.Command{ return err } + // add by pan + worker := cctx.String("worker") + if worker != "" { + minerpath := os.Getenv("LOTUS_MINER_PATH") + path := minerpath + "/sectors" + _, err = os.Stat(path) + if os.IsNotExist(err) { + err = os.Mkdir(path, 0755) + } + path = path + "/" + storiface.SectorName(id) + err = os.WriteFile(path, []byte(worker), 0666) + } + // end + fmt.Println("Created CC sector: ", id.Number) return nil @@ -1740,7 +1763,9 @@ var sectorsUpdateCmd = &cli.Command{ return nil } - return minerAPI.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1))) + fmt.Printf("new worker :%+v", cctx.Args().Get(2)) + // return minerAPI.SectorsUpdate(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1))) + return minerAPI.SectorsUpdateOfSxx(ctx, abi.SectorNumber(id), api.SectorState(cctx.Args().Get(1)), cctx.Args().Get(2)) }, } diff --git a/config.toml b/config.toml new file mode 100644 index 00000000000..10d92305a6d --- /dev/null +++ b/config.toml @@ -0,0 +1,227 @@ +[API] + # Binding address for the Lotus API + # + # type: string + # env var: LOTUS_API_LISTENADDRESS + #ListenAddress = "/ip4/127.0.0.1/tcp/1234/http" + + # type: string + # env var: LOTUS_API_REMOTELISTENADDRESS + #RemoteListenAddress = "" + + # type: Duration + # env var: LOTUS_API_TIMEOUT + #Timeout = "30s" + + +[Backup] + # When set to true disables metadata log (.lotus/kvlog). This can save disk + # space by reducing metadata redundancy. + # + # Note that in case of metadata corruption it might be much harder to recover + # your node if metadata log is disabled + # + # type: bool + # env var: LOTUS_BACKUP_DISABLEMETADATALOG + #DisableMetadataLog = true + + +[Logging] + [Logging.SubsystemLevels] + # env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM + #example-subsystem = "INFO" + + +[Libp2p] + # Binding address for the libp2p host - 0 means random port. + # Format: multiaddress; see https://multiformats.io/multiaddr/ + # + # type: []string + # env var: LOTUS_LIBP2P_LISTENADDRESSES + #ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"] + + # Addresses to explicitally announce to other peers. If not specified, + # all interface addresses are announced + # Format: multiaddress + # + # type: []string + # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES + #AnnounceAddresses = [] + + # Addresses to not announce + # Format: multiaddress + # + # type: []string + # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES + #NoAnnounceAddresses = [] + + # When not disabled (default), lotus asks NAT devices (e.g., routers), to + # open up an external port and forward it to the port lotus is running on. + # When this works (i.e., when your router supports NAT port forwarding), + # it makes the local lotus node accessible from the public internet + # + # type: bool + # env var: LOTUS_LIBP2P_DISABLENATPORTMAP + #DisableNatPortMap = false + + # ConnMgrLow is the number of connections that the basic connection manager + # will trim down to. + # + # type: uint + # env var: LOTUS_LIBP2P_CONNMGRLOW + #ConnMgrLow = 150 + + # ConnMgrHigh is the number of connections that, when exceeded, will trigger + # a connection GC operation. Note: protected/recently formed connections don't + # count towards this limit. + # + # type: uint + # env var: LOTUS_LIBP2P_CONNMGRHIGH + #ConnMgrHigh = 180 + + # ConnMgrGrace is a time duration that new connections are immune from being + # closed by the connection manager. + # + # type: Duration + # env var: LOTUS_LIBP2P_CONNMGRGRACE + #ConnMgrGrace = "20s" + + +[Pubsub] + # Run the node in bootstrap-node mode + # + # type: bool + # env var: LOTUS_PUBSUB_BOOTSTRAPPER + #Bootstrapper = false + + # type: string + # env var: LOTUS_PUBSUB_REMOTETRACER + #RemoteTracer = "" + + +[Client] + # type: bool + # env var: LOTUS_CLIENT_USEIPFS + #UseIpfs = false + + # type: bool + # env var: LOTUS_CLIENT_IPFSONLINEMODE + #IpfsOnlineMode = false + + # type: string + # env var: LOTUS_CLIENT_IPFSMADDR + #IpfsMAddr = "" + + # type: bool + # env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL + #IpfsUseForRetrieval = false + + # The maximum number of simultaneous data transfers between the client + # and storage providers for storage deals + # + # type: uint64 + # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE + #SimultaneousTransfersForStorage = 20 + + # The maximum number of simultaneous data transfers between the client + # and storage providers for retrieval deals + # + # type: uint64 + # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL + #SimultaneousTransfersForRetrieval = 20 + + # Require that retrievals perform no on-chain operations. Paid retrievals + # without existing payment channels with available funds will fail instead + # of automatically performing on-chain operations. + # + # type: bool + # env var: LOTUS_CLIENT_OFFCHAINRETRIEVAL + #OffChainRetrieval = false + + +[Wallet] + # type: string + # env var: LOTUS_WALLET_REMOTEBACKEND + #RemoteBackend = "" + + # type: bool + # env var: LOTUS_WALLET_ENABLELEDGER + #EnableLedger = false + + # type: bool + # env var: LOTUS_WALLET_DISABLELOCAL + #DisableLocal = false + + +[Fees] + # type: types.FIL + # env var: LOTUS_FEES_DEFAULTMAXFEE + #DefaultMaxFee = "0.07 FIL" + + +[Chainstore] + # type: bool + # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE + #EnableSplitstore = false + + [Chainstore.Splitstore] + # ColdStoreType specifies the type of the coldstore. + # It can be "universal" (default) or "discard" for discarding cold blocks. + # + # type: string + # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE + #ColdStoreType = "universal" + + # HotStoreType specifies the type of the hotstore. + # Only currently supported value is "badger". + # + # type: string + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE + #HotStoreType = "badger" + + # MarkSetType specifies the type of the markset. + # It can be "map" for in memory marking or "badger" (default) for on-disk marking. + # + # type: string + # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE + #MarkSetType = "badger" + + # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond + # the compaction boundary; default is 0. + # + # type: uint64 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION + #HotStoreMessageRetention = 0 + + # HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. + # A value of 0 disables, while a value 1 will do full GC in every compaction. + # Default is 20 (about once a week). + # + # type: uint64 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY + #HotStoreFullGCFrequency = 20 + + # EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning + # where hotstore compaction occurs every finality epochs pruning happens every 3 finalities + # Default is false + # + # type: bool + # env var: LOTUS_CHAINSTORE_SPLITSTORE_ENABLECOLDSTOREAUTOPRUNE + #EnableColdStoreAutoPrune = false + + # ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore. + # Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do + # full GC in every prune. + # Default is 7 (about once every a week) + # + # type: uint64 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTOREFULLGCFREQUENCY + #ColdStoreFullGCFrequency = 7 + + # ColdStoreRetention specifies the retention policy for data reachable from the chain, in + # finalities beyond the compaction boundary, default is 0, -1 retains everything + # + # type: int64 + # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORERETENTION + #ColdStoreRetention = 0 + diff --git a/devgen.car b/devgen.car new file mode 100644 index 00000000000..e35ada1ad69 Binary files /dev/null and b/devgen.car differ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/.circleci/config.yml b/extern/sxx-go-fil-markets@v1.24.0-v17/.circleci/config.yml new file mode 100644 index 00000000000..04eb6b1282d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/.circleci/config.yml @@ -0,0 +1,198 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.9 + codecov: codecov/codecov@1.0.2 + +executors: + golang: + docker: + - image: cimg/go:1.17.9 + resource_class: large + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + prepare: + parameters: + linux: + default: true + description: is a linux build environment? + type: boolean + steps: + - checkout + - when: + condition: << parameters.linux >> + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev + - run: git submodule sync + - run: git submodule update --init + build-all: + + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - prepare + - go/mod-download + - go/mod-tidy-check + + cbor-gen-check: + executor: golang + steps: + - install-deps + - prepare + - run: go install golang.org/x/tools/cmd/goimports + - run: go install github.com/hannahhoward/cbor-gen-for + - run: go generate ./... + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + + docs-check: + executor: golang + steps: + - install-deps + - prepare + - run: make docsgen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + imports-check: + executor: golang + steps: + - install-deps + - prepare + - run: go install golang.org/x/tools/cmd/goimports + - run: scripts/fiximports + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + build-all: + executor: golang + steps: + - install-deps + - prepare + - go/mod-download + - run: sudo apt-get update + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go-fil-markets/go.mod" }} + - run: + command: make build + - store_artifacts: + path: go-fil-markets + - store_artifacts: + path: go-fil-markets + + test: &test + description: | + Run tests with gotestsum. + parameters: + executor: + type: executor + default: golang + test-suite-name: + type: string + default: unit + description: Test suite name to report to CircleCI. + codecov-upload: + type: boolean + default: true + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - go/mod-download + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + - go/install-gotestsum: + gobin: $HOME/.local/bin + - run: + name: make test + environment: + GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml + command: | + mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + make test + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + - save_cache: + name: save go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + paths: + - "~/go/pkg" + - "~/go/src/github.com" + - "~/go/src/golang.org" + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.21.0 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make build + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-changes: + <<: *lint + + lint-all: + <<: *lint + +workflows: + version: 2.1 + ci: + jobs: + - lint-changes: + args: "--new-from-rev origin/master" + - test + - mod-tidy-check + - cbor-gen-check + - docs-check + - imports-check + - build-all diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/.gitignore b/extern/sxx-go-fil-markets@v1.24.0-v17/.gitignore new file mode 100644 index 00000000000..2c9ad437de7 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +filestore/_test/ + +# JetBrains +.idea + +.filecoin-build +.update-modules + +# NPM +node_modules/ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/.gitmodules b/extern/sxx-go-fil-markets@v1.24.0-v17/.gitmodules new file mode 100644 index 00000000000..773dea9715d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/.gitmodules @@ -0,0 +1,3 @@ +[submodule "extern/filecoin-ffi"] + path = extern/filecoin-ffi + url = https://github.com/filecoin-project/filecoin-ffi.git diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/CHANGELOG.md b/extern/sxx-go-fil-markets@v1.24.0-v17/CHANGELOG.md new file mode 100644 index 00000000000..27bd98653ac --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/CHANGELOG.md @@ -0,0 +1,1766 @@ +# go-fil-markets changelog + +# go-fil-markets v1.24.0 + +- github.com/filecoin-project/go-fil-markets: + - Update go libp2p v0.21 (#744) ([filecoin-project/go-fil-markets#744](https://github.com/filecoin-project/go-fil-markets/pull/744)) + - feat(retrievalmarket): use ready manager (#739) ([filecoin-project/go-fil-markets#739](https://github.com/filecoin-project/go-fil-markets/pull/739)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Marco Munizaga | 1 | +133/-90 | 11 | +| Hannah Howard | 1 | +4/-4 | 1 | + +# go-fil-markets v1.23.2 + +- github.com/filecoin-project/go-fil-markets: + - feat: dagstore_wrapper: DestroyShardSync (#736) ([filecoin-project/go-fil-markets#736](https://github.com/filecoin-project/go-fil-markets/pull/736)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| LexLuthr | 1 | +35/-1 | 3 | + +# go-fil-markets v1.23.1 + +- github.com/filecoin-project/go-fil-markets: + - Upgrade to `go-car` `2.4.1` (#733) ([filecoin-project/go-fil-markets#733](https://github.com/filecoin-project/go-fil-markets/pull/733)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Masih H. Derkani | 1 | +3/-3 | 2 | + +# go-fil-markets v1.23.0 + +- github.com/filecoin-project/go-fil-markets: + - feat: update deps (#731) ([filecoin-project/go-fil-markets#731](https://github.com/filecoin-project/go-fil-markets/pull/731)) + - Upgrade to go-ipfs-blockstore `v1.2.0` (#728) ([filecoin-project/go-fil-markets#728](https://github.com/filecoin-project/go-fil-markets/pull/728)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Masih H. Derkani | 2 | +39/-19 | 4 | +| dirkmc | 1 | +37/-13 | 5 | + +# go-fil-markets v1.22.2 + +- github.com/filecoin-project/go-fil-markets: + - Upgrade to go-ipfs-blockstore `v1.2.0` (#728) ([filecoin-project/go-fil-markets#728](https://github.com/filecoin-project/go-fil-markets/pull/728)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Masih H. Derkani | 1 | +28/-19 | 3 | + +# go-fil-markets v1.22.1 + +- github.com/filecoin-project/go-fil-markets: + - Upgrade index-provider and related dependencies (#722) ([filecoin-project/go-fil-markets#722](https://github.com/filecoin-project/go-fil-markets/pull/722)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Masih H. Derkani | 1 | +2378/-1841 | 15 | + +# go-fil-markets v1.22.0 + +- github.com/filecoin-project/go-fil-markets: + - migrate provider deal proposal label to the new format (#721) ([filecoin-project/go-fil-markets#721](https://github.com/filecoin-project/go-fil-markets/pull/721)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +1517/-58 | 17 | + +# go-fil-markets v1.21.0 + +- github.com/filecoin-project/go-fil-markets: + - Use new go-state-types state accessors (#711) (#719) ([filecoin-project/go-fil-markets#719](https://github.com/filecoin-project/go-fil-markets/pull/719)) + - chore: update go-car dependency (#709) ([filecoin-project/go-fil-markets#709](https://github.com/filecoin-project/go-fil-markets/pull/709)) + - fix:close ask/dealstatus stream (#710) ([filecoin-project/go-fil-markets#710](https://github.com/filecoin-project/go-fil-markets/pull/710)) + - fix: fire storage deal provider ready event after startup completes (#708) ([filecoin-project/go-fil-markets#708](https://github.com/filecoin-project/go-fil-markets/pull/708)) + - release: v1.20.3 (#703) ([filecoin-project/go-fil-markets#703](https://github.com/filecoin-project/go-fil-markets/pull/703)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 3 | +113/-121 | 39 | +| Rod Vagg | 1 | +3/-3 | 2 | +| Mike | 1 | +2/-0 | 1 | + +# go-fil-markets v1.20.3 +- github.com/filecoin-project/go-fil-markets: + - validate deal proposal (#702) ([filecoin-project/go-fil-markets#702](https://github.com/filecoin-project/go-fil-markets/pull/702)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Anton Evangelatov | 1 | +8/-0 | 1 | + +# go-fil-markets v1.20.1 +- github.com/filecoin-project/go-fil-markets: + - Upgrade index-provider with the latest metadata format (#688) ([filecoin-project/go-fil-markets#688](https://github.com/filecoin-project/go-fil-markets/pull/688)) + - release: v1.20.0 ([filecoin-project/go-fil-markets#686](https://github.com/filecoin-project/go-fil-markets/pull/686)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Masih H. Derkani | 1 | +34/-28 | 4 | + +# go-fil-markets v1.20.0 +- github.com/filecoin-project/go-fil-markets: + - Index publishing work (#673) ([filecoin-project/go-fil-markets#673](https://github.com/filecoin-project/go-fil-markets/pull/673)) + - Merge branch 'release/v1.19.1' + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 1 | +890/-219 | 22 | + +# go-fil-markets v1.19.1 + +- github.com/filecoin-project/go-fil-markets: + - feat(clientstates): handle payment chanel ready to go (#677) ([filecoin-project/go-fil-markets#677](https://github.com/filecoin-project/go-fil-markets/pull/677)) + - release: v1.19.0 ([filecoin-project/go-fil-markets#672](https://github.com/filecoin-project/go-fil-markets/pull/672)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +23/-5 | 6 | + +# go-fil-markets v1.19.0 + +- github.com/filecoin-project/go-fil-markets: + - Update to go-data-transfer v1.14.0 / go-libp2p v0.18.0-rc1 (#669) ([filecoin-project/go-fil-markets#669](https://github.com/filecoin-project/go-fil-markets/pull/669)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +163/-119 | 9 | +| dirkmc | 1 | +20/-0 | 1 | + +# go-fil-markets v1.18.0 + +Reverts the following commits: +- github.com/filecoin-project/go-fil-markets: + - log advertisement cid for announcement and update deps (#667) ([filecoin-project/go-fil-markets#667](https://github.com/filecoin-project/go-fil-markets/pull/667)) + - release: v1.15.0 ([filecoin-project/go-fil-markets#661](https://github.com/filecoin-project/go-fil-markets/pull/661)) + - retrieve by any CID (not just root CID) and reference provider integration (#629) ([filecoin-project/go-fil-markets#629](https://github.com/filecoin-project/go-fil-markets/pull/629)) + +The revert commit is: +- github.com/filecoin-project/go-fil-markets: + - revert index provider PR #629 and associated PRs (#670) ([filecoin-project/go-fil-markets#670](https://github.com/filecoin-project/go-fil-markets/pull/670)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +1038/-1053 | 45 | +| Aarsh Shah | 1 | +23/-19 | 5 | +| Dirk McCormick | 1 | +11/-0 | 1 | + +# go-fil-markets v1.14.1 + +- github.com/filecoin-project/go-fil-markets: + - refactor: isolate commp in its own package to avoid exposing pulling filecoin-ffi (#659) ([filecoin-project/go-fil-markets#659](https://github.com/filecoin-project/go-fil-markets/pull/659)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 1 | +5/-4 | 3 | + +# go-fil-markets v1.14.0 + +- github.com/filecoin-project/go-fil-markets: + - add SeekStart method to inflator reader (#656) ([filecoin-project/go-fil-markets#656](https://github.com/filecoin-project/go-fil-markets/pull/656)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 1 | +317/-52 | 10 | + +# go-fil-markets v1.13.6 + +Add timeout for awaiting restart + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Timeout provider after waiting a period of time for transfer to restart (#655) ([filecoin-project/go-fil-markets#655](https://github.com/filecoin-project/go-fil-markets/pull/655)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +204/-86 | 10 | + +# go-fil-markets v1.13.5 + +Update to context data stores + +# go-fil-markets 1.13.4 + +- github.com/filecoin-project/go-fil-markets: + - Update go-car to v2.1.0 (#650) ([filecoin-project/go-fil-markets#650](https://github.com/filecoin-project/go-fil-markets/pull/650)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Łukasz Magiera | 1 | +1670/-6 | 4 | + +# go-fil-markets 1.13.3 + +- github.com/filecoin-project/go-fil-markets: + - Fix network error log (#643) ([filecoin-project/go-fil-markets#643](https://github.com/filecoin-project/go-fil-markets/pull/643)) +- github.com/filecoin-project/go-data-transfer (v1.11.1 -> v1.11.4): + - fix: clear error message on channel open after restart (#273) ([filecoin-project/go-data-transfer#273](https://github.com/filecoin-project/go-data-transfer/pull/273)) + - fix: flaky TestAutoRestartAfterBouncingInitiator (sleep before starting new initiator) (#275) ([filecoin-project/go-data-transfer#275](https://github.com/filecoin-project/go-data-transfer/pull/275)) + - fix: check channel cancel on pause / resume (#271) ([filecoin-project/go-data-transfer#271](https://github.com/filecoin-project/go-data-transfer/pull/271)) + - fix: startup channel monitor when a channel is restarted (#269) ([filecoin-project/go-data-transfer#269](https://github.com/filecoin-project/go-data-transfer/pull/269)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 7 | +297/-18 | 14 | +| Dirk McCormick | 3 | +35/-0 | 3 | +| Aarsh Shah | 1 | +1/-1 | 1 | + +# go-fil-markets 1.13.2 + +- github.com/filecoin-project/go-fil-markets: + - block in Publish state when message sending fails due to lack of funds (#638) ([filecoin-project/go-fil-markets#638](https://github.com/filecoin-project/go-fil-markets/pull/638)) + - Standardize definition of a recursive non-matching selector (#640) ([filecoin-project/go-fil-markets#640](https://github.com/filecoin-project/go-fil-markets/pull/640)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Peter Rabbitson | 1 | +42/-48 | 14 | +| Anton Evangelatov | 1 | +21/-0 | 4 | + +# go-fil-markets 1.13.1 + +- github.com/filecoin-project/go-fil-markets: + - feat: upgrade to go-data-transfer v1.11.1 (#636) ([filecoin-project/go-fil-markets#636](https://github.com/filecoin-project/go-fil-markets/pull/636)) +- github.com/filecoin-project/go-data-transfer (v1.11.0 -> v1.11.1): + - feat: update to go-graphsync v0.10.0 (#263) ([filecoin-project/go-data-transfer#263](https://github.com/filecoin-project/go-data-transfer/pull/263)) +- github.com/ipfs/go-graphsync (v0.10.0-rc3 -> v0.10.0): + - feat: update to go-ipld-prime v0.12.3 (#237) ([ipfs/go-graphsync#237](https://github.com/ipfs/go-graphsync/pull/237)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Dirk McCormick | 2 | +33/-0 | 2 | +| dirkmc | 3 | +14/-16 | 7 | + +# go-fil-markets 1.13.0 + +- github.com/filecoin-project/go-fil-markets: + - commp: expose MaxTraversalLinks option for generating CommP (#633) ([filecoin-project/go-fil-markets#633](https://github.com/filecoin-project/go-fil-markets/pull/633)) + - fix: retrieval not found error (#630) ([filecoin-project/go-fil-markets#630](https://github.com/filecoin-project/go-fil-markets/pull/630)) +- github.com/filecoin-project/go-data-transfer (v1.10.0 -> v1.11.0): + - feat: update to go-ipld-prime v0.12.3 (#261) ([filecoin-project/go-data-transfer#261](https://github.com/filecoin-project/go-data-transfer/pull/261)) + - refactor: remove libp2p protocol cache (#259) ([filecoin-project/go-data-transfer#259](https://github.com/filecoin-project/go-data-transfer/pull/259)) + - feat: update to graphsync v0.10.0-rc3 (#258) ([filecoin-project/go-data-transfer#258](https://github.com/filecoin-project/go-data-transfer/pull/258)) + - Use do-not-send-first-blocks extension for restarts (#257) ([filecoin-project/go-data-transfer#257](https://github.com/filecoin-project/go-data-transfer/pull/257)) + - Fix parallel transfers between same two peers (#254) ([filecoin-project/go-data-transfer#254](https://github.com/filecoin-project/go-data-transfer/pull/254)) +- github.com/ipfs/go-graphsync (v0.9.0 -> v0.10.0-rc3): + - Do not send first blocks extension (#230) ([ipfs/go-graphsync#230](https://github.com/ipfs/go-graphsync/pull/230)) + - Protect Libp2p Connections (#229) ([ipfs/go-graphsync#229](https://github.com/ipfs/go-graphsync/pull/229)) + - test(responsemanager): remove check (#228) ([ipfs/go-graphsync#228](https://github.com/ipfs/go-graphsync/pull/228)) + - feat(graphsync): give missing blocks a named error (#227) ([ipfs/go-graphsync#227](https://github.com/ipfs/go-graphsync/pull/227)) + - Add request limits (#224) ([ipfs/go-graphsync#224](https://github.com/ipfs/go-graphsync/pull/224)) + - Tech Debt Cleanup and Docs Update (#219) ([ipfs/go-graphsync#219](https://github.com/ipfs/go-graphsync/pull/219)) + - fix(requestmanager): remove main thread block on allocation (#216) ([ipfs/go-graphsync#216](https://github.com/ipfs/go-graphsync/pull/216)) + - feat(allocator): add debug logging (#213) ([ipfs/go-graphsync#213](https://github.com/ipfs/go-graphsync/pull/213)) + - fix: spurious warn log (#210) ([ipfs/go-graphsync#210](https://github.com/ipfs/go-graphsync/pull/210)) + - fix(message): fix dropping of response extensions (#211) ([ipfs/go-graphsync#211](https://github.com/ipfs/go-graphsync/pull/211)) + - docs(CHANGELOG): update change log ([ipfs/go-graphsync#208](https://github.com/ipfs/go-graphsync/pull/208)) + - docs(README): add notice about branch rename + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 14 | +3315/-2461 | 98 | +| dirkmc | 7 | +837/-363 | 51 | +| hannahhoward | 8 | +138/-3 | 9 | +| Dirk McCormick | 2 | +38/-3 | 2 | + +# go-fil-markets 1.12.0 + +Reinstate update to graphsync v0.9.0 with new Linksystem IPLD prime + +# go-fil-markets v1.11.0 + +- Remove go-multistore dependency +- update go-data-transfer to v1.9.0 +- Revert "Pass deal proposal instead of deal ID to OnDealExpiredOrSlashed (#476) ([filecoin-project/go-fil-markets#476](https://github.com/filecoin-project/go-fil-markets/pull/476))" + +# go-fil-markets v1.9.0 + +- "Pass deal proposal instead of deal ID to OnDealExpiredOrSlashed (#616) ([filecoin-project/go-fil-markets#616](https://github.com/filecoin-project/go-fil-markets/pull/616))" + +# go-fil-markets v1.8.0 + +- Migrate to DAG store + CARv2 blockstores for storage and retrieval (#576) ([filecoin-project/go-fil-markets#576](https://github.com/filecoin-project/go-fil-markets/pull/576)) + +# go-fil-markets v1.7.0 + +- Pass deal proposal instead of deal ID to OnDealExpiredOrSlashed (#476) ([filecoin-project/go-fil-markets#476](https://github.com/filecoin-project/go-fil-markets/pull/476)) + +# go-fil-markets v1.6.0 + +- Add a `StorageDealTransferQueued` event on the storage client that means that the data transfer request has been queued and will be executed soon. +- Support padding out smaller files (https://github.com/filecoin-project/go-fil-markets/pull/536). +- Storage Client peer and the peer that actually does the data transfer can be different (https://github.com/filecoin-project/go-fil-markets/pull/585). + +# go-fil-markets v1.5.0 + +Adds Dynamic Retrieval Pricing + +- github.com/filecoin-project/go-fil-markets: + - Dynamic Retrieval Pricing (#542) ([filecoin-project/go-fil-markets#542](https://github.com/filecoin-project/go-fil-markets/pull/542)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 1 | +1224/-130 | 17 | +| dirkmc | 1 | +18/-0 | 1 | + +# go-fil-markets v1.4.0 + +Generating Changelog for github.com/filecoin-project/go-fil-markets v1.3.0..3511c5617142836e369e31890f544bdd574d970f +- github.com/filecoin-project/go-fil-markets: + - Update to go data transfer v1.6.0 (#550) ([filecoin-project/go-fil-markets#550](https://github.com/filecoin-project/go-fil-markets/pull/550)) + - fix first make error (#548) ([filecoin-project/go-fil-markets#548](https://github.com/filecoin-project/go-fil-markets/pull/548)) +- github.com/filecoin-project/go-data-transfer (v1.5.0 -> v1.6.0): + - fix: option to disable accept and complete timeouts + - fix: disable restart ack timeout + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Dirk McCormick | 3 | +53/-105 | 6 | +| dirkmc | 2 | +53/-33 | 10 | +| Jack Yao | 1 | +1/-1 | 1 | + +# go-fil-markets v1.3.0 + +- github.com/filecoin-project/go-fil-markets: + - fix restarts during data transfer for a retrieval deal (#540) ([filecoin-project/go-fil-markets#540](https://github.com/filecoin-project/go-fil-markets/pull/540)) + - Test Retrieval for offline deals (#541) ([filecoin-project/go-fil-markets#541](https://github.com/filecoin-project/go-fil-markets/pull/541)) + - Allow anonymous submodule checkout (#535) ([filecoin-project/go-fil-markets#535](https://github.com/filecoin-project/go-fil-markets/pull/535)) +- github.com/filecoin-project/go-data-transfer (v1.4.3 -> v1.5.0): + - Add isRestart param to validators (#197) ([filecoin-project/go-data-transfer#197](https://github.com/filecoin-project/go-data-transfer/pull/197)) + - fix: flaky TestChannelMonitorAutoRestart (#198) ([filecoin-project/go-data-transfer#198](https://github.com/filecoin-project/go-data-transfer/pull/198)) + - Channel monitor watches for errors instead of measuring data rate (#190) ([filecoin-project/go-data-transfer#190](https://github.com/filecoin-project/go-data-transfer/pull/190)) + - fix: prevent concurrent restarts for same channel (#195) ([filecoin-project/go-data-transfer#195](https://github.com/filecoin-project/go-data-transfer/pull/195)) + - fix: channel state machine event handling (#194) ([filecoin-project/go-data-transfer#194](https://github.com/filecoin-project/go-data-transfer/pull/194)) + - Dont double count data sent (#185) ([filecoin-project/go-data-transfer#185](https://github.com/filecoin-project/go-data-transfer/pull/185)) +- github.com/ipfs/go-graphsync (v0.6.0 -> v0.6.1): + - feat: fire network error when network disconnects during request (#164) ([ipfs/go-graphsync#164](https://github.com/ipfs/go-graphsync/pull/164)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 9 | +2595/-1346 | 70 | +| Aarsh Shah | 1 | +226/-71 | 1 | +| Dirk McCormick | 2 | +32/-0 | 2 | +| Peter Rabbitson | 1 | +1/-1 | 1 | + +# go-fil-markets v1.2.5 + +- github.com/filecoin-project/go-fil-markets: + - add timeout for sending cancel message to peer when retrieval cancelled (#531) ([filecoin-project/go-fil-markets#531](https://github.com/filecoin-project/go-fil-markets/pull/531)) + - Flush out & fix retrieval bugs (#525) ([filecoin-project/go-fil-markets#525](https://github.com/filecoin-project/go-fil-markets/pull/525)) + - fix: use time-based deal ID instead of stored counter (#529) ([filecoin-project/go-fil-markets#529](https://github.com/filecoin-project/go-fil-markets/pull/529)) +- github.com/filecoin-project/go-data-transfer (v1.4.1 -> v1.4.2): + - Support no-op error responses (#186) ([filecoin-project/go-data-transfer#186](https://github.com/filecoin-project/go-data-transfer/pull/186)) + - fix: fail a pull channel when there is a timeout receiving the Complete message (#179) ([filecoin-project/go-data-transfer#179](https://github.com/filecoin-project/go-data-transfer/pull/179)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 2 | +293/-364 | 16 | +| dirkmc | 4 | +206/-149 | 12 | +| Dirk McCormick | 1 | +13/-0 | 1 | + +# go-fil-markets v1.2.4 + +- github.com/filecoin-project/go-fil-markets: + - feat: update to go-data-transfer v1.4.1 (#523) ([filecoin-project/go-fil-markets#523](https://github.com/filecoin-project/go-fil-markets/pull/523)) + - Poll Provider for acceptance only till (deal start epoch + grace period) has elapsed (#516) ([filecoin-project/go-fil-markets#516](https://github.com/filecoin-project/go-fil-markets/pull/516)) + - Add DealStages to track and log Deal status updates (#502) ([filecoin-project/go-fil-markets#502](https://github.com/filecoin-project/go-fil-markets/pull/502)) +- github.com/filecoin-project/go-data-transfer (v1.4.0 -> v1.4.1): + - Add ChannelStages to keep track of history of lifecycle of a DataTransfer (#163) ([filecoin-project/go-data-transfer#163](https://github.com/filecoin-project/go-data-transfer/pull/163)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Anton Evangelatov | 2 | +1360/-39 | 16 | +| Aarsh Shah | 1 | +42/-0 | 2 | +| dirkmc | 3 | +32/-3 | 5 | +| Dirk McCormick | 1 | +11/-0 | 1 | + +# go-fil-markets v1.2.3 + +Generating Changelog for github.com/filecoin-project/go-fil-markets v1.2.2..cf830ee0459221d4ba7e91f105a0f19b6d5a453e +- github.com/filecoin-project/go-fil-markets: + - fix: process payment request from provider while initiating payment channel (#520) ([filecoin-project/go-fil-markets#520](https://github.com/filecoin-project/go-fil-markets/pull/520)) + - fix: remove LocatePieceForDealWithinSector (no longer used) (#518) ([filecoin-project/go-fil-markets#518](https://github.com/filecoin-project/go-fil-markets/pull/518)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 3 | +66/-60 | 8 | + +# go-fil-markets v1.2.2 + +- github.com/filecoin-project/go-fil-markets: + - feat: update to go-data-transfer v1.4.0 (#514) ([filecoin-project/go-fil-markets#514](https://github.com/filecoin-project/go-fil-markets/pull/514)) +- github.com/filecoin-project/go-data-transfer (v1.3.0 -> v1.4.0): + - feat: add config options to enable / disable push or pull monitoring individually (#174) ([filecoin-project/go-data-transfer#174](https://github.com/filecoin-project/go-data-transfer/pull/174)) + - fix: ensure channel monitor shuts down when transfer complete (#171) ([filecoin-project/go-data-transfer#171](https://github.com/filecoin-project/go-data-transfer/pull/171)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 5 | +65/-17 | 9 | +| Dirk McCormick | 1 | +12/-0 | 1 | + +# go-fil-markets v1.2.1 + +- github.com/filecoin-project/go-fil-markets: + - feat: update tests for go-data-transfer 1.3.0 (#510) ([filecoin-project/go-fil-markets#510](https://github.com/filecoin-project/go-fil-markets/pull/510)) +- github.com/filecoin-project/go-data-transfer (v1.2.9 -> v1.3.0): + - feat: use random number instead of incrementing counter for transfer ID (#169) ([filecoin-project/go-data-transfer#169](https://github.com/filecoin-project/go-data-transfer/pull/169)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 4 | +185/-95 | 21 | +| Dirk McCormick | 1 | +11/-0 | 1 | + +# go-fil-markets v1.2.0 + +- github.com/filecoin-project/go-fil-markets: + - feat: update to go-data-transfer v1.2.9 (#508) (#504) ([filecoin-project/go-fil-markets#504](https://github.com/filecoin-project/go-fil-markets/pull/504)) + - always try to return some message to the client (#498) ([filecoin-project/go-fil-markets#498](https://github.com/filecoin-project/go-fil-markets/pull/498)) + - fix: close the reader after unsealing into blockstore (#507) ([filecoin-project/go-fil-markets#507](https://github.com/filecoin-project/go-fil-markets/pull/507)) + - fix: disallow concurrent retrieval deals for same peer/cid (#493) ([filecoin-project/go-fil-markets#493](https://github.com/filecoin-project/go-fil-markets/pull/493)) + - make deal state channel id nilable (#490) ([filecoin-project/go-fil-markets#490](https://github.com/filecoin-project/go-fil-markets/pull/490)) + - fix: add funds to payment channel before allocating lane (#495) ([filecoin-project/go-fil-markets#495](https://github.com/filecoin-project/go-fil-markets/pull/495)) +- github.com/filecoin-project/go-data-transfer (v1.2.7 -> v1.2.9): + - fix: log line in pull data channel monitor (#165) ([filecoin-project/go-data-transfer#165](https://github.com/filecoin-project/go-data-transfer/pull/165)) + - feat: better reconnect behaviour (#162) ([filecoin-project/go-data-transfer#162](https://github.com/filecoin-project/go-data-transfer/pull/162)) + - Improve push channel to detect when not all data has been received (#157) ([filecoin-project/go-data-transfer#157](https://github.com/filecoin-project/go-data-transfer/pull/157)) + - fix: flaky TestSimulatedRetrievalFlow (#159) ([filecoin-project/go-data-transfer#159](https://github.com/filecoin-project/go-data-transfer/pull/159)) + - feat: better logging (#155) ([filecoin-project/go-data-transfer#155](https://github.com/filecoin-project/go-data-transfer/pull/155)) + - fix: add missing event names (#148) ([filecoin-project/go-data-transfer#148](https://github.com/filecoin-project/go-data-transfer/pull/148)) + - enable codecov. (#146) ([filecoin-project/go-data-transfer#146](https://github.com/filecoin-project/go-data-transfer/pull/146)) + - Better error message on complete (#145) ([filecoin-project/go-data-transfer#145](https://github.com/filecoin-project/go-data-transfer/pull/145)) + - test: check total blocks sent when theres a restart (#140) ([filecoin-project/go-data-transfer#140](https://github.com/filecoin-project/go-data-transfer/pull/140)) + - feat(deps): update to go-graphsync v0.6.0 (#139) ([filecoin-project/go-data-transfer#139](https://github.com/filecoin-project/go-data-transfer/pull/139)) +- github.com/ipfs/go-graphsync (v0.5.2 -> v0.6.0): + - docs(CHANGELOG): revise for 0.6.0 + - docs(CHANGELOG): update for 0.6.0 release + - move block allocation into message queue (#140) ([ipfs/go-graphsync#140](https://github.com/ipfs/go-graphsync/pull/140)) + - Response Assembler Refactor (#138) ([ipfs/go-graphsync#138](https://github.com/ipfs/go-graphsync/pull/138)) + - Add error listener on receiver (#136) ([ipfs/go-graphsync#136](https://github.com/ipfs/go-graphsync/pull/136)) + - Run testplan on in CI (#137) ([ipfs/go-graphsync#137](https://github.com/ipfs/go-graphsync/pull/137)) + - fix(responsemanager): fix network error propogation (#133) ([ipfs/go-graphsync#133](https://github.com/ipfs/go-graphsync/pull/133)) + - testground test for graphsync (#132) ([ipfs/go-graphsync#132](https://github.com/ipfs/go-graphsync/pull/132)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 17 | +3973/-1000 | 88 | +| Alex Cruikshank | 4 | +3135/-1785 | 46 | +| Hannah Howard | 4 | +671/-386 | 28 | +| Whyrusleeping | 1 | +33/-22 | 1 | +| hannahhoward | 2 | +38/-15 | 4 | +| Dirk McCormick | 1 | +19/-0 | 1 | +| raulk | 1 | +2/-2 | 1 | + +# go-fil-markets v1.1.9 + +- github.com/filecoin-project/go-fil-markets: + - Update cbor-gen to ingore unknown map fields (#492) ([filecoin-project/go-fil-markets#492](https://github.com/filecoin-project/go-fil-markets/pull/492)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Łukasz Magiera | 1 | +111/-33 | 13 | +| dirkmc | 1 | +11/-0 | 1 | +| Whyrusleeping | 1 | +3/-1 | 1 | +| Dirk McCormick | 1 | +1/-3 | 1 | + +# go-fil-markets v1.1.8 + +- github.com/filecoin-project/go-fil-markets: + - feat: add RawBlockSize to DataRef (#487) ([filecoin-project/go-fil-markets#487](https://github.com/filecoin-project/go-fil-markets/pull/487)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +47/-3 | 3 | + +# go-fil-markets v1.1.7 + +- github.com/filecoin-project/go-fil-markets: + - Better storage fsm error handling (#484) ([filecoin-project/go-fil-markets#484](https://github.com/filecoin-project/go-fil-markets/pull/484)) + - release: v1.1.6 (#485) ([filecoin-project/go-fil-markets#485](https://github.com/filecoin-project/go-fil-markets/pull/485)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +91/-20 | 3 | + +# go-fil-markets v1.1.6 + +- github.com/filecoin-project/go-fil-markets: + - fix: move publish deals msg response parsing from markets into lotus (#475) ([filecoin-project/go-fil-markets#475](https://github.com/filecoin-project/go-fil-markets/pull/475)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +60/-22 | 5 | + +# go-fil-markets v1.1.5 + +- github.com/filecoin-project/go-fil-markets: + - retrieval: skip payment channel creation if total price is zero (#480) ([filecoin-project/go-fil-markets#480](https://github.com/filecoin-project/go-fil-markets/pull/480)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 1 | +36/-7 | 8 | + +# go-fil-markets v1.1.3 + +- github.com/filecoin-project/go-fil-markets: + - fix: handle retrieval deals with zero price per byte (#477) ([filecoin-project/go-fil-markets#477](https://github.com/filecoin-project/go-fil-markets/pull/477)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +144/-28 | 13 | + +# go-fil-markets v1.1.2 + +Generating Changelog for github.com/filecoin-project/go-fil-markets v1.1.1..d43e9447747b51824c9628e7cff5f2d2c97ae253 +- github.com/filecoin-project/go-fil-markets: + - feat: update to go-data-transfer v1.2.7 (#472) ([filecoin-project/go-fil-markets#472](https://github.com/filecoin-project/go-fil-markets/pull/472)) +- github.com/filecoin-project/go-data-transfer (v1.2.5 -> v1.2.7): + - feat: configurable send message timeouts (#136) ([filecoin-project/go-data-transfer#136](https://github.com/filecoin-project/go-data-transfer/pull/136)) + - log request / response events (#137) ([filecoin-project/go-data-transfer#137](https://github.com/filecoin-project/go-data-transfer/pull/137)) + - fix: dont complete transfer because graphsync request was cancelled (#134) ([filecoin-project/go-data-transfer#134](https://github.com/filecoin-project/go-data-transfer/pull/134)) + - feat: better push channel monitor logging (#133) ([filecoin-project/go-data-transfer#133](https://github.com/filecoin-project/go-data-transfer/pull/133)) + - add logging to push channel monitor (#131) ([filecoin-project/go-data-transfer#131](https://github.com/filecoin-project/go-data-transfer/pull/131)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 9 | +216/-72 | 17 | +| Dirk McCormick | 1 | +17/-2 | 1 | + +# go-fil-markets v1.1.1 + +- github.com/filecoin-project/go-fil-markets: + - feat: go-data-transfer v1.2.5 (with restart limit) (#470) ([filecoin-project/go-fil-markets#470](https://github.com/filecoin-project/go-fil-markets/pull/470)) +- github.com/filecoin-project/go-data-transfer (v1.2.4 -> v1.2.5): + - feat: limit consecutive restarts with no data transfer (#129) ([filecoin-project/go-data-transfer#129](https://github.com/filecoin-project/go-data-transfer/pull/129)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 4 | +212/-82 | 9 | +| Dirk McCormick | 1 | +13/-0 | 1 | + +# go-fil-markets v1.1.0 + +Better retry config + +### Changelog +- github.com/filecoin-project/go-fil-markets: + - feat: retry config - separate max open attempts from backoff factor (#467) ([filecoin-project/go-fil-markets#467](https://github.com/filecoin-project/go-fil-markets/pull/467)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +236/-113 | 12 | + +# go-fil-markets v1.0.11 + +When the miner restarts it should not dial the client, the client will automatically dial the miner. + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - on restart miner shouldn't dial client (#463) ([filecoin-project/go-fil-markets#463](https://github.com/filecoin-project/go-fil-markets/pull/463)) +- github.com/filecoin-project/go-data-transfer (v1.2.3 -> v1.2.4): + - Automatically restart push channel (#127) ([filecoin-project/go-data-transfer#127](https://github.com/filecoin-project/go-data-transfer/pull/127)) +- github.com/ipfs/go-graphsync (v0.5.0 -> v0.5.2): + - RegisterNetworkErrorListener should fire when there's an error connecting to the peer (#127) ([ipfs/go-graphsync#127](https://github.com/ipfs/go-graphsync/pull/127)) + - Permit multiple data subscriptions per original topic (#128) ([ipfs/go-graphsync#128](https://github.com/ipfs/go-graphsync/pull/128)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 6 | +1491/-282 | 42 | +| Alex Cruikshank | 1 | +188/-110 | 12 | +| hannahhoward | 1 | +30/-14 | 8 | +| Hannah Howard | 1 | +23/-6 | 3 | +| Dirk McCormick | 1 | +19/-0 | 1 | + +# go-fil-markets v1.0.10 + +### Changelog + +Generating Changelog for github.com/filecoin-project/go-fil-markets v1.0.9..73534649f61b89d2a332b0939e19aa440a9dc6dc +- github.com/filecoin-project/go-fil-markets: + - feat: update to go-data-transfer v1.2.3 (#465) ([filecoin-project/go-fil-markets#465](https://github.com/filecoin-project/go-fil-markets/pull/465)) + - Normalize final states for cancelled retrieval deals (#464) ([filecoin-project/go-fil-markets#464](https://github.com/filecoin-project/go-fil-markets/pull/464)) +- github.com/filecoin-project/go-data-transfer (v1.2.2 -> v1.2.3): + - Better retry config (#124) ([filecoin-project/go-data-transfer#124](https://github.com/filecoin-project/go-data-transfer/pull/124)) + - feat: expose channel state on Manager interface (#125) ([filecoin-project/go-data-transfer#125](https://github.com/filecoin-project/go-data-transfer/pull/125)) + - Fix typo, wrap correct FSM error (#123) ([filecoin-project/go-data-transfer#123](https://github.com/filecoin-project/go-data-transfer/pull/123)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 5 | +188/-10 | 10 | +| Ingar Shu | 2 | +59/-26 | 16 | +| Dirk McCormick | 1 | +16/-0 | 1 | + +# go-fil-markets v1.0.9 + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - fix: clear deal message when deal accepted (#461) ([filecoin-project/go-fil-markets#461](https://github.com/filecoin-project/go-fil-markets/pull/461)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 2 | +24/-0 | 2 | + + +# go-fil-markets v1.0.8 + +### Changelog + +- feat: go-data-transfer 1.2.2 (#459) ([filecoin-project/go-fil-markets#459](https://github.com/filecoin-project/go-fil-markets/pull/459)) +- Cancel transfer should cause Storage deal to fail (#457) ([filecoin-project/go-fil-markets#457](https://github.com/filecoin-project/go-fil-markets/pull/457)) +- Give more visibility (to the client) while waiting for deal acceptance (#458) ([filecoin-project/go-fil-markets#458](https://github.com/filecoin-project/go-fil-markets/pull/458)) +- docs(CHANGELOG): update for v1.0.7 release ([filecoin-project/go-fil-markets#456](https://github.com/filecoin-project/go-fil-markets/pull/456)) +- github.com/filecoin-project/go-data-transfer (v1.0.1 -> v1.2.2): + - fix(graphsync): fix UseStore for restarts (#115) ([filecoin-project/go-data-transfer#115](https://github.com/filecoin-project/go-data-transfer/pull/115)) + - Fire cancel locally even if remote cancel fails (#120) ([filecoin-project/go-data-transfer#120](https://github.com/filecoin-project/go-data-transfer/pull/120)) + - fix: respect context when opening stream (#119) ([filecoin-project/go-data-transfer#119](https://github.com/filecoin-project/go-data-transfer/pull/119)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +1307/-213 | 22 | +| dirkmc | 5 | +395/-82 | 23 | +| Steven Allen | 4 | +419/-37 | 8 | +| Ingar Shu | 1 | +10/-5 | 3 | +| Dirk McCormick | 1 | +13/-0 | 1 | + +# go-fil-markets v1.0.7 + +Seperate pre-commit & post-commit states for deals + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for v1.0.6 ([filecoin-project/go-fil-markets#454](https://github.com/filecoin-project/go-fil-markets/pull/454)) + - Add StorageDealAwaitingPreCommit state (#453) ([filecoin-project/go-fil-markets#453](https://github.com/filecoin-project/go-fil-markets/pull/453)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 1 | +349/-37 | 19 | + +# go-fil-markets v1.0.6 + +Complete removal of temp file usage + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Integrate go-commp-utils (#452) ([filecoin-project/go-fil-markets#452](https://github.com/filecoin-project/go-fil-markets/pull/452)) + - release: v1.0.5 ([filecoin-project/go-fil-markets#451](https://github.com/filecoin-project/go-fil-markets/pull/451)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| dirkmc | 1 | +51/-854 | 21 | + +# go-fil-markets v1.0.5 + +Minor bug fix release + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Remove temp files (#449) ([filecoin-project/go-fil-markets#449](https://github.com/filecoin-project/go-fil-markets/pull/449)) + - fix: fail fast in tests that use a waitgroup Wait() (#450) ([filecoin-project/go-fil-markets#450](https://github.com/filecoin-project/go-fil-markets/pull/450)) + - Make it possible to configure the proof type at runtime. ([filecoin-project/go-fil-markets#448](https://github.com/filecoin-project/go-fil-markets/pull/448)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +292/-393 | 17 | +| Steven Allen | 1 | +21/-11 | 6 | +| dirkmc | 1 | +21/-7 | 1 | + +# go-fil-markets v1.0.4 + +Integrate new lotus FundManager + +### Changelog + +- [Integrate new FundManager](https://github.com/filecoin-project/go-fil-markets/pull/445) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Dirk McCormick | 1 | +75/-311 | 17 | + +# go-fil-markets v1.0.1 + +Minor bug fixes and interface change for OnDealSectorCommitted + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Pass PublishCID (#441) ([filecoin-project/go-fil-markets#441](https://github.com/filecoin-project/go-fil-markets/pull/441)) + - docs(CHANGELOG): update for 1.0.0 release ([filecoin-project/go-fil-markets#443](https://github.com/filecoin-project/go-fil-markets/pull/443)) + - fix(providerstates): save paths on commP mismatch (#440) ([filecoin-project/go-fil-markets#440](https://github.com/filecoin-project/go-fil-markets/pull/440)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +14/-9 | 7 | + +# go-fil-markets v1.0.0 + +Cut 1.0.0 release with a few fixes + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for 0.9.1 ([filecoin-project/go-fil-markets#439](https://github.com/filecoin-project/go-fil-markets/pull/439)) + - If available, SetAsk will use a previously-stored ask for default values for Min/Max piece sizes (#438) ([filecoin-project/go-fil-markets#438](https://github.com/filecoin-project/go-fil-markets/pull/438)) + - Create SECURITY.md (#436) ([filecoin-project/go-fil-markets#436](https://github.com/filecoin-project/go-fil-markets/pull/436)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 1 | +26/-13 | 2 | +| David Dias | 1 | +9/-0 | 1 | + +# go-fil-markets v0.9.1 + +Critical bug fix for v0.9.0 + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - fix(clientstates): fix panil on transfer id not present (#437) ([filecoin-project/go-fil-markets#437](https://github.com/filecoin-project/go-fil-markets/pull/437)) + - docs(CHANGELOG): update for 0.9.0 release ([filecoin-project/go-fil-markets#434](https://github.com/filecoin-project/go-fil-markets/pull/434)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +4/-0 | 1 | + +# go-fil-markets v0.9.0 + +Major update with data transfer protocol v1.1 with support for restarts +Also restarts data transfer automatically for storage market side + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Record data transfer errors (#432) ([filecoin-project/go-fil-markets#432](https://github.com/filecoin-project/go-fil-markets/pull/432)) + - set ask correct behaviour (#433) ([filecoin-project/go-fil-markets#433](https://github.com/filecoin-project/go-fil-markets/pull/433)) + - Resume Storage Market Data Transfer (#430) ([filecoin-project/go-fil-markets#430](https://github.com/filecoin-project/go-fil-markets/pull/430)) + - Exponential backoff, rebased (#431) ([filecoin-project/go-fil-markets#431](https://github.com/filecoin-project/go-fil-markets/pull/431)) + - Providers should reject deals with invalid durations (#427) ([filecoin-project/go-fil-markets#427](https://github.com/filecoin-project/go-fil-markets/pull/427)) + - docs(CHANGELOG): update for 0.7.1 ([filecoin-project/go-fil-markets#428](https://github.com/filecoin-project/go-fil-markets/pull/428)) +- github.com/filecoin-project/go-data-transfer (v0.6.7 -> v0.9.0): + - docs(CHANGELOG): update for 0.9.0 + - Message compatibility on graphsync (#102) ([filecoin-project/go-data-transfer#102](https://github.com/filecoin-project/go-data-transfer/pull/102)) + - Handle network errors/stalls (#101) ([filecoin-project/go-data-transfer#101](https://github.com/filecoin-project/go-data-transfer/pull/101)) + - Resume Data Transfer (#100) ([filecoin-project/go-data-transfer#100](https://github.com/filecoin-project/go-data-transfer/pull/100)) + - docs(CHANGELOG): update for 0.6.7 release ([filecoin-project/go-data-transfer#98](https://github.com/filecoin-project/go-data-transfer/pull/98)) +- github.com/ipfs/go-graphsync (v0.2.1 -> v0.3.0): + - feat(CHANGELOG): update for 0.3.0 + - docs(CHANGELOG): update for 0.2.1 ([ipfs/go-graphsync#103](https://github.com/ipfs/go-graphsync/pull/103)) + - Track actual network operations in a response (#102) ([ipfs/go-graphsync#102](https://github.com/ipfs/go-graphsync/pull/102)) + - feat(responsecache): prune blocks more intelligently (#101) ([ipfs/go-graphsync#101](https://github.com/ipfs/go-graphsync/pull/101)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 3 | +10588/-2517 | 105 | +| Hannah Howard | 6 | +2463/-1036 | 68 | +| hannahhoward | 2 | +50/-3 | 5 | +| Aayush Rajasekaran | 1 | +23/-20 | 4 | + +# go-fil-markets v0.7.1 + +minor bug fixes and optimizations + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for 0.7.0 ([filecoin-project/go-fil-markets#426](https://github.com/filecoin-project/go-fil-markets/pull/426)) + - early return in retrieval validation to prevent unnecessary disk access (#425) ([filecoin-project/go-fil-markets#425](https://github.com/filecoin-project/go-fil-markets/pull/425)) + - Add option for specific stopping event for providers in TestRestartClient (#424) ([filecoin-project/go-fil-markets#424](https://github.com/filecoin-project/go-fil-markets/pull/424)) + - Handle re-sent deal proposals (#423) ([filecoin-project/go-fil-markets#423](https://github.com/filecoin-project/go-fil-markets/pull/423)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 2 | +145/-13 | 4 | +| Hannah Howard | 1 | +16/-14 | 2 | + + +# go-fil-markets v0.7.0 + +Switch to cbor-map encodings across the module, with migrations + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Switch to cbor map encoding for storage market (#420) ([filecoin-project/go-fil-markets#420](https://github.com/filecoin-project/go-fil-markets/pull/420)) + - Retrieval Migration to CBOR Map (#418) ([filecoin-project/go-fil-markets#418](https://github.com/filecoin-project/go-fil-markets/pull/418)) + - Move PieceStore To Map Encodings (#415) ([filecoin-project/go-fil-markets#415](https://github.com/filecoin-project/go-fil-markets/pull/415)) + - docs(CHANGELOG): update for 0.6.3 ([filecoin-project/go-fil-markets#421](https://github.com/filecoin-project/go-fil-markets/pull/421)) +- github.com/filecoin-project/go-data-transfer (v0.6.6 -> v0.6.7-0.20200929095758-d6c2eaff0694): + - Feat/cleanup errors (#90) ([filecoin-project/go-data-transfer#90](https://github.com/filecoin-project/go-data-transfer/pull/90)) + - Disambiguate whether a revalidator recognized a request when checking for a need to revalidate (#87) ([filecoin-project/go-data-transfer#87](https://github.com/filecoin-project/go-data-transfer/pull/87)) + - docs(CHANGELOG): update for 0.6.6 ([filecoin-project/go-data-transfer#89](https://github.com/filecoin-project/go-data-transfer/pull/89)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200813232949-df9b130df370 -> v0.0.0-20200925024713-05bd7c71fbfe): + - feat(fsm): switch to interface storedstate + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 5 | +13849/-3533 | 110 | +| hannahhoward | 1 | +13/-2 | 2 | + +# go-fil-markets v0.6.3 + +dependency update with fix for graphsync + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - feat(deps): update data transfer 0.6.6 (#417) ([filecoin-project/go-fil-markets#417](https://github.com/filecoin-project/go-fil-markets/pull/417)) + - docs(CHANGELOG): update for 0.6.2 ([filecoin-project/go-fil-markets#416](https://github.com/filecoin-project/go-fil-markets/pull/416)) +- github.com/filecoin-project/go-data-transfer (v0.6.5 -> v0.6.6): + - docs(CHANGELOG): update for 0.6.6 + - feat(deps): update graphsync (#86) ([filecoin-project/go-data-transfer#86](https://github.com/filecoin-project/go-data-transfer/pull/86)) + - docs(CHANGELOG): updates for 0.6.5 ([filecoin-project/go-data-transfer#85](https://github.com/filecoin-project/go-data-transfer/pull/85)) +- github.com/ipfs/go-graphsync (v0.2.0 -> v0.2.1): + - docs(CHANGELOG): update for 0.2.1 + - Release/0.2.0 ([ipfs/go-graphsync#99](https://github.com/ipfs/go-graphsync/pull/99)) + - fix(metadata): fix cbor-gen (#98) ([ipfs/go-graphsync#98](https://github.com/ipfs/go-graphsync/pull/98)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| hannahhoward | 2 | +104/-68 | 2 | +| Hannah Howard | 3 | +21/-25 | 7 | + +# go-fil-markets v0.6.2 + +security fixes and dependency updates + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - feat(deps): update data transfer and graphsync (#413) ([filecoin-project/go-fil-markets#413](https://github.com/filecoin-project/go-fil-markets/pull/413)) + - Additional validations for deal proposals (#409) ([filecoin-project/go-fil-markets#409](https://github.com/filecoin-project/go-fil-markets/pull/409)) + - Release/v0.6.1 ([filecoin-project/go-fil-markets#412](https://github.com/filecoin-project/go-fil-markets/pull/412)) + - feat(storagemarket): remove passthrough methods (#404) ([filecoin-project/go-fil-markets#404](https://github.com/filecoin-project/go-fil-markets/pull/404)) + - - Consider ClientCollateral when validating deal proposal (#408) ([filecoin-project/go-fil-markets#408](https://github.com/filecoin-project/go-fil-markets/pull/408)) + - Persist retrieval ask on disk (#410) ([filecoin-project/go-fil-markets#410](https://github.com/filecoin-project/go-fil-markets/pull/410)) + - Tidy up comments (#405) ([filecoin-project/go-fil-markets#405](https://github.com/filecoin-project/go-fil-markets/pull/405)) +- github.com/filecoin-project/go-data-transfer (v0.6.4 -> v0.6.5): + - docs(CHANGELOG): updates for 0.6.5 + - feat(deps): update graphsync 0.2.0 (#83) ([filecoin-project/go-data-transfer#83](https://github.com/filecoin-project/go-data-transfer/pull/83)) + - docs(CHANGELOG): update for 0.6.4 ([filecoin-project/go-data-transfer#82](https://github.com/filecoin-project/go-data-transfer/pull/82)) +- github.com/hannahhoward/cbor-gen-for (v0.0.0-20200723175505-5892b522820a -> v0.0.0-20200817222906-ea96cece81f1): + - add flag to select map encoding ([hannahhoward/cbor-gen-for#1](https://github.com/hannahhoward/cbor-gen-for/pull/1)) +- github.com/ipfs/go-graphsync (v0.1.2 -> v0.2.0): + - docs(CHANGELOG): update for 0.2.0 + - style(imports): fix imports + - fix(selectorvalidator): memory optimization (#97) ([ipfs/go-graphsync#97](https://github.com/ipfs/go-graphsync/pull/97)) + - Update go-ipld-prime@v0.5.0 (#92) ([ipfs/go-graphsync#92](https://github.com/ipfs/go-graphsync/pull/92)) + - refactor(metadata): use cbor-gen encoding (#96) ([ipfs/go-graphsync#96](https://github.com/ipfs/go-graphsync/pull/96)) + - Release/v0.1.2 ([ipfs/go-graphsync#95](https://github.com/ipfs/go-graphsync/pull/95)) + - Return Request context cancelled error (#93) ([ipfs/go-graphsync#93](https://github.com/ipfs/go-graphsync/pull/93)) + - feat(benchmarks): add p2p stress test (#91) ([ipfs/go-graphsync#91](https://github.com/ipfs/go-graphsync/pull/91)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Eric Myhre | 1 | +2919/-121 | 39 | +| Hannah Howard | 6 | +498/-346 | 49 | +| hannahhoward | 3 | +248/-189 | 9 | +| Ingar Shu | 3 | +357/-38 | 12 | +| whyrusleeping | 1 | +31/-18 | 2 | +| Aarsh Shah | 1 | +27/-1 | 3 | +| Kirk Baird | 1 | +11/-11 | 3 | + +# go-fil-markets 0.6.1 + +Minor fixes around network timeouts, restarts, chain issues + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - feat(deps): update data transfer 0.6.4 + - feat(network): add config options (#403) ([filecoin-project/go-fil-markets#403](https://github.com/filecoin-project/go-fil-markets/pull/403)) + - Test Restart for ClientEventDealPublished AND ClientEventDealActivated (#399) ([filecoin-project/go-fil-markets#399](https://github.com/filecoin-project/go-fil-markets/pull/399)) + - increase timeout (#402) ([filecoin-project/go-fil-markets#402](https://github.com/filecoin-project/go-fil-markets/pull/402)) + - - Change signature of callback for WaitForMessage() to take a "final Cid" parameter (#400) ([filecoin-project/go-fil-markets#400](https://github.com/filecoin-project/go-fil-markets/pull/400)) + - feat(clientutils): encode cid directly (#398) ([filecoin-project/go-fil-markets#398](https://github.com/filecoin-project/go-fil-markets/pull/398)) + - docs(CHANGELOG): update for 0.6.0 release ([filecoin-project/go-fil-markets#397](https://github.com/filecoin-project/go-fil-markets/pull/397)) +- github.com/filecoin-project/go-data-transfer (v0.6.3 -> v0.6.4): + - docs(CHANGELOG): update for 0.6.4 + - Ensure valid messages are returned from FromNet() (#74) ([filecoin-project/go-data-transfer#74](https://github.com/filecoin-project/go-data-transfer/pull/74)) + - Release/v0.6.3 ([filecoin-project/go-data-transfer#70](https://github.com/filecoin-project/go-data-transfer/pull/70)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +94/-68 | 11 | +| Aarsh Shah | 2 | +108/-39 | 5 | +| Ingar Shu | 2 | +45/-14 | 10 | +| hannahhoward | 2 | +19/-3 | 3 | + +# go-fil-markets 0.6.0 + +Major release that updates to specs-actors v0.9.7, which also introduces the new `go-state-types` dependency. This release also introduces non-blocking retrieval. + +### Changelog + + - Update to specs-actors v0.9.7 (#396) ([filecoin-project/go-fil-markets#396](https://github.com/filecoin-project/go-fil-markets/pull/396)) + - Use shared types for exitcode and crypto (#395) ([filecoin-project/go-fil-markets#395](https://github.com/filecoin-project/go-fil-markets/pull/395)) + - Consume go-state-types (#394) ([filecoin-project/go-fil-markets#394](https://github.com/filecoin-project/go-fil-markets/pull/394)) + - Convert label JSON to CBOR (#393) ([filecoin-project/go-fil-markets#393](https://github.com/filecoin-project/go-fil-markets/pull/393)) + - Non blocking retrieval (#392) ([filecoin-project/go-fil-markets#392](https://github.com/filecoin-project/go-fil-markets/pull/392)) + - fix(storagemarket): fix test hang (#391) ([filecoin-project/go-fil-markets#391](https://github.com/filecoin-project/go-fil-markets/pull/391)) + - docs(CHANGELOG): update for 0.5.9 release ([filecoin-project/go-fil-markets#390](https://github.com/filecoin-project/go-fil-markets/pull/390)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +678/-289 | 25 | +| ZenGround0 | 2 | +226/-187 | 78 | +| Aayush Rajasekaran | 1 | +3/-3 | 2 | + +# go-fil-markets 0.5.9 + +Minor stability release to handle connection drops more gracefully and fix a retrieval bug + +### Changelog + + - github.com/filecoin-project/go-fil-markets: + - Retry with exponential backoff for stream opening (#382) ([filecoin-project/go-fil-markets#382](https://github.com/filecoin-project/go-fil-markets/pull/382)) + - fix(retrievalmarket): last block on interval works (#388) ([filecoin-project/go-fil-markets#388](https://github.com/filecoin-project/go-fil-markets/pull/388)) + - Release/v0.5.8 ([filecoin-project/go-fil-markets#386](https://github.com/filecoin-project/go-fil-markets/pull/386)) + - docs: Add disclaimer to guide people to report issues in Lotus (#384) ([filecoin-project/go-fil-markets#384](https://github.com/filecoin-project/go-fil-markets/pull/384)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +161/-25 | 17 | +| Aarsh Shah | 1 | +132/-24 | 9 | +| David Dias | 1 | +5/-0 | 1 | + +# go-fil-markets 0.5.8 + +Minor bug fix release with various small improvements, and memory fixes for Graphsync + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - fix(deps): update data transfer + - fix restart issue with deal transferring (#381) ([filecoin-project/go-fil-markets#381](https://github.com/filecoin-project/go-fil-markets/pull/381)) + - Plumb deal.PublishCid in HandoffDeal (#383) ([filecoin-project/go-fil-markets#383](https://github.com/filecoin-project/go-fil-markets/pull/383)) + - docs(CHANGELOG): update for v0.5.7 ([filecoin-project/go-fil-markets#376](https://github.com/filecoin-project/go-fil-markets/pull/376)) +- github.com/filecoin-project/go-data-transfer (v0.6.2 -> v0.6.3): + - docs(CHANGELOG): update for 0.6.3 + - fix(deps): update graphsync, small cleanup + - Stop data transfer correctly and some minor cleanp (#69) ([filecoin-project/go-data-transfer#69](https://github.com/filecoin-project/go-data-transfer/pull/69)) + - docs(CHANGELOG): update for 0.6.2 release ([filecoin-project/go-data-transfer#68](https://github.com/filecoin-project/go-data-transfer/pull/68)) +- github.com/ipfs/go-graphsync (v0.1.1 -> v0.1.2): + - fix(asyncloader): remove send on close channel + - docs(CHANGELOG): update for 0.1.2 release + - Benchmark framework + First memory fixes (#89) ([ipfs/go-graphsync#89](https://github.com/ipfs/go-graphsync/pull/89)) + - docs(CHANGELOG): update for v0.1.1 ([ipfs/go-graphsync#85](https://github.com/ipfs/go-graphsync/pull/85)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +1055/-39 | 17 | +| Aarsh Shah | 2 | +125/-120 | 13 | +| hannahhoward | 5 | +120/-44 | 16 | +| Łukasz Magiera | 1 | +1/-0 | 1 | + +# go-fil-markets 0.5.7 + +Minor release with bug fix for retrieval markets + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - fix(retrievalmarket): fix unseal error hang (#375) ([filecoin-project/go-fil-markets#375](https://github.com/filecoin-project/go-fil-markets/pull/375)) + - add deal creation time to deals (#373) ([filecoin-project/go-fil-markets#373](https://github.com/filecoin-project/go-fil-markets/pull/373)) +- github.com/filecoin-project/go-data-transfer (v0.6.1 -> v0.6.2): + - docs(CHANGELOG): update for 0.6.2 release + - Fix Pull Request Cancelling (#67) ([filecoin-project/go-data-transfer#67](https://github.com/filecoin-project/go-data-transfer/pull/67)) + - docs(CHANGELOG): update for 0.6.1 ([filecoin-project/go-data-transfer#66](https://github.com/filecoin-project/go-data-transfer/pull/66)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +325/-38 | 15 | +| Whyrusleeping | 1 | +61/-22 | 11 | +| hannahhoward | 1 | +16/-0 | 1 | + +# go-fil-markets 0.5.6 + +Miner release w/ various bug fixes + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for v0.5.6 release + - feat(requestvalidation): use getsync in validation (#372) ([filecoin-project/go-fil-markets#372](https://github.com/filecoin-project/go-fil-markets/pull/372)) + - Use collateral from ProposeStorageDealParams (#371) ([filecoin-project/go-fil-markets#371](https://github.com/filecoin-project/go-fil-markets/pull/371)) + - Fix UML for docs-check (#370) ([filecoin-project/go-fil-markets#370](https://github.com/filecoin-project/go-fil-markets/pull/370)) + - docs(CHANGELOG): update for 0.5.5 ([filecoin-project/go-fil-markets#368](https://github.com/filecoin-project/go-fil-markets/pull/368)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200730031800-c3336614d2a7 -> v0.0.0-20200813232949-df9b130df370): + - fix(uml): make just record predictable (#19) ([filecoin-project/go-statemachine#19](https://github.com/filecoin-project/go-statemachine/pull/19)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +318/-235 | 19 | +| hannahhoward | 1 | +21/-0 | 1 | +| Aayush Rajasekaran | 1 | +6/-3 | 1 | + +# go-fil-markets v0.5.5 + +Minor release updates dependencies and adds back in peer tagging + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Update data transfer 0.6.1 (#366) ([filecoin-project/go-fil-markets#366](https://github.com/filecoin-project/go-fil-markets/pull/366)) + - Update specs-actors and cbor-gen (#363) ([filecoin-project/go-fil-markets#363](https://github.com/filecoin-project/go-fil-markets/pull/363)) + - Add back connection tagging (#362) ([filecoin-project/go-fil-markets#362](https://github.com/filecoin-project/go-fil-markets/pull/362)) +- github.com/filecoin-project/go-data-transfer (v0.5.3 -> v0.6.1): + - docs(CHANGELOG): update for 0.6.1 + - Update graphsync 0.1.1 (#65) ([filecoin-project/go-data-transfer#65](https://github.com/filecoin-project/go-data-transfer/pull/65)) + - Update changelog for 0.6.0 release (#64) ([filecoin-project/go-data-transfer#64](https://github.com/filecoin-project/go-data-transfer/pull/64)) + - Update cbor-gen (#63) ([filecoin-project/go-data-transfer#63](https://github.com/filecoin-project/go-data-transfer/pull/63)) + - Release/v0.5.3 ([filecoin-project/go-data-transfer#62](https://github.com/filecoin-project/go-data-transfer/pull/62)) +- github.com/ipfs/go-graphsync (v0.1.0 -> v0.1.1): + - docs(CHANGELOG): update for v0.1.1 + - docs(CHANGELOG): update for v0.1.0 release ([ipfs/go-graphsync#84](https://github.com/ipfs/go-graphsync/pull/84)) + - Dedup by key extension (#83) ([ipfs/go-graphsync#83](https://github.com/ipfs/go-graphsync/pull/83)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 4 | +590/-154 | 33 | +| Steven Allen | 3 | +161/-176 | 12 | +| hannahhoward | 2 | +38/-1 | 3 | + +# go-fil-markets 0.5.4 + +Minor release to bug fix some multi-addr issues + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Properly add multiaddrs to avoid dialing issues (#356) ([filecoin-project/go-fil-markets#356](https://github.com/filecoin-project/go-fil-markets/pull/356)) + - docs(CHANGELOG): update for 0.5.3 release ([filecoin-project/go-fil-markets#355](https://github.com/filecoin-project/go-fil-markets/pull/355)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +178/-39 | 16 | + +# go-fil-markets 0.5.3 + +Retrieval before seal and improvements to fund management + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Update multistore (#353) ([filecoin-project/go-fil-markets#353](https://github.com/filecoin-project/go-fil-markets/pull/353)) + - feat(storagemarket): add label field to deals (#349) ([filecoin-project/go-fil-markets#349](https://github.com/filecoin-project/go-fil-markets/pull/349)) + - fix(imports): fix imports issue from previous + - retrieval: Drain piece readers (#348) ([filecoin-project/go-fil-markets#348](https://github.com/filecoin-project/go-fil-markets/pull/348)) + - Adding different prices for verified deals (#347) ([filecoin-project/go-fil-markets#347](https://github.com/filecoin-project/go-fil-markets/pull/347)) + - update to latest cbor-gen (#346) ([filecoin-project/go-fil-markets#346](https://github.com/filecoin-project/go-fil-markets/pull/346)) + - fix(deps): update data-transfer + - Update specs-actors, enforce provider deal collateral bounds (#345) ([filecoin-project/go-fil-markets#345](https://github.com/filecoin-project/go-fil-markets/pull/345)) + - Feat/retrieve unsealed (#340) ([filecoin-project/go-fil-markets#340](https://github.com/filecoin-project/go-fil-markets/pull/340)) + - Track deals funding for deals that are being negotiated (#336) ([filecoin-project/go-fil-markets#336](https://github.com/filecoin-project/go-fil-markets/pull/336)) + - docs(CHANGELOG): update for 0.5.2 release ([filecoin-project/go-fil-markets#344](https://github.com/filecoin-project/go-fil-markets/pull/344)) +- github.com/filecoin-project/go-data-transfer (v0.5.1 -> v0.5.3): + - docs(CHANGELOG): update for 0.5.3 release + - fix(deps): update graphsync + - Release infrastructure (#61) ([filecoin-project/go-data-transfer#61](https://github.com/filecoin-project/go-data-transfer/pull/61)) + - Update cbor-gen (#60) ([filecoin-project/go-data-transfer#60](https://github.com/filecoin-project/go-data-transfer/pull/60)) + - fix(deps): update graphsync + - fix(message): add error check to FromNet (#59) ([filecoin-project/go-data-transfer#59](https://github.com/filecoin-project/go-data-transfer/pull/59)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200714194326-a77c3ae20989 -> v0.0.0-20200730031800-c3336614d2a7): + - fix(fsm): fix test context (#18) ([filecoin-project/go-statemachine#18](https://github.com/filecoin-project/go-statemachine/pull/18)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200721211002-c376cbe14c0a -> v0.1.0): + - docs(CHANGELOG): update for v0.1.0 release + - Release infrastructure (#81) ([ipfs/go-graphsync#81](https://github.com/ipfs/go-graphsync/pull/81)) + - feat(persistenceoptions): add unregister ability (#80) ([ipfs/go-graphsync#80](https://github.com/ipfs/go-graphsync/pull/80)) + - fix(message): regen protobuf code (#79) ([ipfs/go-graphsync#79](https://github.com/ipfs/go-graphsync/pull/79)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 7 | +1710/-533 | 105 | +| Whyrusleeping | 2 | +477/-433 | 35 | +| Ingar Shu | 1 | +545/-20 | 26 | +| Łukasz Magiera | 2 | +189/-176 | 9 | +| Aayush Rajasekaran | 3 | +144/-79 | 21 | +| hannahhoward | 6 | +87/-18 | 10 | + +# go-fil-markets 0.5.2 + +In this release, we move to managing individual, garbage collected stores for each deal. + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Unique Blockstores: RetrievalMarket (#342) ([filecoin-project/go-fil-markets#342](https://github.com/filecoin-project/go-fil-markets/pull/342)) + - Unique blockstores: Storage Market (#341) ([filecoin-project/go-fil-markets#341](https://github.com/filecoin-project/go-fil-markets/pull/341)) + - docs(CHANGLEOG): update for 0.5.1 ([filecoin-project/go-fil-markets#335](https://github.com/filecoin-project/go-fil-markets/pull/335)) +- github.com/filecoin-project/go-data-transfer (v0.5.0 -> v0.5.1): + - Allow custom configuration of transports (#57) ([filecoin-project/go-data-transfer#57](https://github.com/filecoin-project/go-data-transfer/pull/57)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200715204712-ef06b3d32e83 -> v0.0.6-0.20200721211002-c376cbe14c0a): + - feat(persistenceoptions): add unregister ability + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +2378/-1076 | 90 | +| hannahhoward | 1 | +113/-32 | 5 | + +# go-fil-markets 0.5.1 + +Minor improvements and bug fixes, retrieval market now supports unseal price + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - fix(queryresponse): set piece retrieval price correctly (#334) ([filecoin-project/go-fil-markets#334](https://github.com/filecoin-project/go-fil-markets/pull/334)) + - feat(deps): upgrade specs actors to lotus/next sha (#333) ([filecoin-project/go-fil-markets#333](https://github.com/filecoin-project/go-fil-markets/pull/333)) + - Support retrieval UnsealPrice (#325) (#330) ([filecoin-project/go-fil-markets#330](https://github.com/filecoin-project/go-fil-markets/pull/330)) + - Cleanup project imports (#327) ([filecoin-project/go-fil-markets#327](https://github.com/filecoin-project/go-fil-markets/pull/327)) + - Use client address, not default address, to sign proposals (#326) ([filecoin-project/go-fil-markets#326](https://github.com/filecoin-project/go-fil-markets/pull/326)) + - Remove DealAcceptanceBuffer (#322) ([filecoin-project/go-fil-markets#322](https://github.com/filecoin-project/go-fil-markets/pull/322)) + - docs(CHANGELOG): update for 0.5.0 release ([filecoin-project/go-fil-markets#324](https://github.com/filecoin-project/go-fil-markets/pull/324)) +- github.com/hannahhoward/cbor-gen-for (v0.0.0-20191218204337-9ab7b1bcc099 -> v0.0.0-20200723175505-5892b522820a): + - fix(deps): update cbor-gen-to-latest + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +1363/-723 | 94 | +| Ingar Shu | 1 | +262/-122 | 26 | +| Aayush Rajasekaran | 2 | +13/-75 | 5 | +| hannahhoward | 1 | +25/-32 | 2 | + +# go-fil-markets 0.5.0 + +Major release with rewrite of retrieval to operate on top of data transfer protocol. + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Retrieval on data transfer (#321) ([filecoin-project/go-fil-markets#321](https://github.com/filecoin-project/go-fil-markets/pull/321)) + - docs(CHANGELOG): update for 0.4.0 ([filecoin-project/go-fil-markets#318](https://github.com/filecoin-project/go-fil-markets/pull/318)) +- github.com/filecoin-project/go-data-transfer (v0.4.0 -> v0.5.0): + - Minor fixes for retrieval on data transfer (#56) ([filecoin-project/go-data-transfer#56](https://github.com/filecoin-project/go-data-transfer/pull/56)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200703171610-a74a697973b9 -> v0.0.0-20200714194326-a77c3ae20989): + - Just Record Events (#17) ([filecoin-project/go-statemachine#17](https://github.com/filecoin-project/go-statemachine/pull/17)) + - GetSync method (#16) ([filecoin-project/go-statemachine#16](https://github.com/filecoin-project/go-statemachine/pull/16)) + - feat(statemachine): add Has method (#15) ([filecoin-project/go-statemachine#15](https://github.com/filecoin-project/go-statemachine/pull/15)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200708073926-caa872f68b2c -> v0.0.6-0.20200715204712-ef06b3d32e83): + - feat(requestmanager): run response hooks on completed requests (#77) ([ipfs/go-graphsync#77](https://github.com/ipfs/go-graphsync/pull/77)) + - Revert "add extensions on complete (#76)" + - add extensions on complete (#76) ([ipfs/go-graphsync#76](https://github.com/ipfs/go-graphsync/pull/76)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 7 | +3865/-3792 | 96 | +| hannahhoward | 1 | +155/-270 | 9 | + +# go-fil-markets 0.4.0 + +Major release with touchups to deal protocol (does not hold open connections), major rewrite of underlying data transfer library, and features like fast retrieval and verified client support + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Update to new data transfer (#317) ([filecoin-project/go-fil-markets#317](https://github.com/filecoin-project/go-fil-markets/pull/317)) + - Add NewQueryV1, which has an optional PieceID param (#313) ([filecoin-project/go-fil-markets#313](https://github.com/filecoin-project/go-fil-markets/pull/313)) + - Add PieceCid to RetrievalPeer (#312) ([filecoin-project/go-fil-markets#312](https://github.com/filecoin-project/go-fil-markets/pull/312)) + - add locks around client deals (#315) ([filecoin-project/go-fil-markets#315](https://github.com/filecoin-project/go-fil-markets/pull/315)) + - Merge branch 'release/v0.3.1.1' + - add locks protecting retrieval market maps (#311) ([filecoin-project/go-fil-markets#311](https://github.com/filecoin-project/go-fil-markets/pull/311)) + - Fix/make streams context aware (#308) ([filecoin-project/go-fil-markets#308](https://github.com/filecoin-project/go-fil-markets/pull/308)) + - Reject deals based on verifreg.DataCap for client (#307) ([filecoin-project/go-fil-markets#307](https://github.com/filecoin-project/go-fil-markets/pull/307)) + - fix(storagemarket): run deal restarts in go routine (#309) ([filecoin-project/go-fil-markets#309](https://github.com/filecoin-project/go-fil-markets/pull/309)) + - StorageProvider records Slashed Or Expired Deals (#306) ([filecoin-project/go-fil-markets#306](https://github.com/filecoin-project/go-fil-markets/pull/306)) + - Fast retrieval support (#305) ([filecoin-project/go-fil-markets#305](https://github.com/filecoin-project/go-fil-markets/pull/305)) + - Clients can create verified deals (#304) ([filecoin-project/go-fil-markets#304](https://github.com/filecoin-project/go-fil-markets/pull/304)) + - Use Query Protocol in storage deal negotiation (#297) ([filecoin-project/go-fil-markets#297](https://github.com/filecoin-project/go-fil-markets/pull/297)) +- github.com/filecoin-project/go-data-transfer (v0.3.0 -> v0.4.0): + - The new data transfer (#55) ([filecoin-project/go-data-transfer#55](https://github.com/filecoin-project/go-data-transfer/pull/55)) + - Actually track progress for send/receive (#53) ([filecoin-project/go-data-transfer#53](https://github.com/filecoin-project/go-data-transfer/pull/53)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200619205156-c7bf525c06ef -> v0.0.0-20200703171610-a74a697973b9): + - feat(statemachine): add Has method +- github.com/ipfs/go-graphsync (v0.0.6-0.20200504202014-9d5f2c26a103 -> v0.0.6-0.20200708073926-caa872f68b2c): + - All changes to date including pause requests & start paused, along with new adds for cleanups and checking of execution (#75) ([ipfs/go-graphsync#75](https://github.com/ipfs/go-graphsync/pull/75)) + - More fine grained response controls (#71) ([ipfs/go-graphsync#71](https://github.com/ipfs/go-graphsync/pull/71)) + - Refactor request execution and use IPLD SkipMe functionality for proper partial results on a request (#70) ([ipfs/go-graphsync#70](https://github.com/ipfs/go-graphsync/pull/70)) + - feat(graphsync): implement do-no-send-cids extension (#69) ([ipfs/go-graphsync#69](https://github.com/ipfs/go-graphsync/pull/69)) + - Incoming Block Hooks (#68) ([ipfs/go-graphsync#68](https://github.com/ipfs/go-graphsync/pull/68)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 10 | +12777/-4664 | 152 | +| Ingar Shu | 7 | +1318/-834 | 61 | +| dirkmc | 1 | +401/-212 | 20 | +| vyzo | 3 | +36/-6 | 3 | +| hannahhoward | 2 | +25/-0 | 3 | + +# go-fil-markets 0.3.1.1 + +Hotfix bug release to address critical issues affecting node startup + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - add locks protecting retrieval market maps (#311) ([filecoin-project/go-fil-markets#311](https://github.com/filecoin-project/go-fil-markets/pull/311)) + - fix(storagemarket): run deal restarts in go routine (#309) ([filecoin-project/go-fil-markets#309](https://github.com/filecoin-project/go-fil-markets/pull/309)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +13/-7 | 2 | +| vyzo | 1 | +10/-0 | 1 | + +# go-fil-markets 0.3.1 + +Hotfix release to get `use addresses from miner info for connecting to miners` task merged for downstream dependencies to used + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - use addresses from miner info for connecting to miners (#290) ([filecoin-project/go-fil-markets#290](https://github.com/filecoin-project/go-fil-markets/pull/290)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Whyrusleeping | 1 | +53/-5 | 9 | + +# go-fil-markets 0.3.0 + +Deal Resumability release. We now attempt to resume storage deals when the application is shut down and restart, and we support a more flexible deal acceptance protocol. + +### Changelog +- github.com/filecoin-project/go-fil-markets: + - fix(storagemarket): fix validator, add to test + - docs(CHANGELOG): update changelog and add detail script + - both StoredAsk and storage Provider are scoped to a single miner (#276) ([filecoin-project/go-fil-markets#276](https://github.com/filecoin-project/go-fil-markets/pull/276)) + - specs actors v0.6 (#274) ([filecoin-project/go-fil-markets#274](https://github.com/filecoin-project/go-fil-markets/pull/274)) + - Restartable storage deals (#270) ([filecoin-project/go-fil-markets#270](https://github.com/filecoin-project/go-fil-markets/pull/270)) + - replace AddAsk with SetAsk, to convey intent (#275) ([filecoin-project/go-fil-markets#275](https://github.com/filecoin-project/go-fil-markets/pull/275)) + - Allow custom decisioning for a provider to decide retrieval deals. (#269) ([filecoin-project/go-fil-markets#269](https://github.com/filecoin-project/go-fil-markets/pull/269)) + - Feat/module docs #83 (#267) ([filecoin-project/go-fil-markets#267](https://github.com/filecoin-project/go-fil-markets/pull/267)) + - Tentative acceptance protocol (#244) ([filecoin-project/go-fil-markets#244](https://github.com/filecoin-project/go-fil-markets/pull/244)) + - docs(CHANGELOG): update changelog for 0.2.7 ([filecoin-project/go-fil-markets#264](https://github.com/filecoin-project/go-fil-markets/pull/264)) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200226041606-2074af6d51d9 -> v0.0.0-20200612181802-4eb3d0c68eba): + - Serialize notifications (#11) ([filecoin-project/go-statemachine#11](https://github.com/filecoin-project/go-statemachine/pull/11)) + - Run callback in goroutine (#10) ([filecoin-project/go-statemachine#10](https://github.com/filecoin-project/go-statemachine/pull/10)) + - Finality States ([filecoin-project/go-statemachine#9](https://github.com/filecoin-project/go-statemachine/pull/9)) + - Documentation, particularly for FSM Module (#8) ([filecoin-project/go-statemachine#8](https://github.com/filecoin-project/go-statemachine/pull/8)) + - Call stageDone on nil nextStep ([filecoin-project/go-statemachine#7](https://github.com/filecoin-project/go-statemachine/pull/7)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 4 | +1407/-695 | 35 | +| Shannon Wells | 2 | +1515/-467 | 20 | +| hannahhoward | 8 | +862/-191 | 21 | +| Hannah Howard | 1 | +263/-0 | 2 | +| Łukasz Magiera | 1 | +48/-43 | 15 | +| Erin Swenson-Healey | 2 | +39/-42 | 10 | + +# go-fil-markets 0.2.7 + +Custom Deal Decision Logic and cleanups of 0.2.6 + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update changelog for 0.2.7 + - refactor(storagemarket): remove storedask from provider (#263) ([filecoin-project/go-fil-markets#263](https://github.com/filecoin-project/go-fil-markets/pull/263)) + - Deal Decision Custom Function (#262) ([filecoin-project/go-fil-markets#262](https://github.com/filecoin-project/go-fil-markets/pull/262)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +142/-27 | 11 | +| shannonwells | 1 | +19/-6 | 1 | + +# go-fil-markets 0.2.6 + +Remove data store wrapping + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - Feat/change prefixes 256 (#257) ([filecoin-project/go-fil-markets#257](https://github.com/filecoin-project/go-fil-markets/pull/257)) + - docs(CHANGELOG): update for 0.2.5 release ([filecoin-project/go-fil-markets#254](https://github.com/filecoin-project/go-fil-markets/pull/254)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Shannon Wells | 1 | +6/-15 | 5 | + +# go-fil-markets 0.2.5 + +go-filecoin compatibility release + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for 0.2.5 release + - Fixes from filecoin integration work (#253) ([filecoin-project/go-fil-markets#253](https://github.com/filecoin-project/go-fil-markets/pull/253)) + - docs(CHANGELOG): update change log ([filecoin-project/go-fil-markets#250](https://github.com/filecoin-project/go-fil-markets/pull/250)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +138/-68 | 7 | +| hannahhoward | 1 | +8/-3 | 3 | + +# go-fil-markets 0.2.4 + +go-filecoin compatibility release + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update change log + - Buffer the done channel when adding storage collateral (#249) ([filecoin-project/go-fil-markets#249](https://github.com/filecoin-project/go-fil-markets/pull/249)) + - feat(CHANGELOG): update changelog for v0.2.3 ([filecoin-project/go-fil-markets#248](https://github.com/filecoin-project/go-fil-markets/pull/248)) + - Unified request validator (#247) ([filecoin-project/go-fil-markets#247](https://github.com/filecoin-project/go-fil-markets/pull/247)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 2 | +221/-230 | 7 | +| hannahhoward | 1 | +8/-0 | 1 | + +# go-fil-markets 0.2.3 + +Hotfix release -- final fix for issues with deal streams held open + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - feat(CHANGELOG): update changelog for v0.2.3 + - feat(network): tag connections to preserve them (#246) ([filecoin-project/go-fil-markets#246](https://github.com/filecoin-project/go-fil-markets/pull/246)) + - docs(CHANGELOG): docs for 0.2.2 release ([filecoin-project/go-fil-markets#243](https://github.com/filecoin-project/go-fil-markets/pull/243)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +112/-7 | 10 | +| hannahhoward | 1 | +7/-1 | 1 | + +# go-fil-markets 0.2.2 + +Hotfix release -- updates to try to solve deal stream problems attempt #2 & v26 params update + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): docs for 0.2.2 release + - feat(storagemarket): revert protocol changes (#236) ([filecoin-project/go-fil-markets#236](https://github.com/filecoin-project/go-fil-markets/pull/236)) + - Feat/cbor gen check ci #231 (#234) ([filecoin-project/go-fil-markets#234](https://github.com/filecoin-project/go-fil-markets/pull/234)) + - update sector-storage and break transitive dependency on lotus (#235) ([filecoin-project/go-fil-markets#235](https://github.com/filecoin-project/go-fil-markets/pull/235)) + - docs(CHANGELOG): update for 0.2.1 release ([filecoin-project/go-fil-markets#233](https://github.com/filecoin-project/go-fil-markets/pull/233)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +701/-614 | 22 | +| Erin Swenson-Healey | 1 | +5/-265 | 2 | +| Shannon Wells | 1 | +11/-0 | 1 | +| hannahhoward | 1 | +8/-1 | 1 | + +# go-fil-markets 0.2.1 + +Hotfix release -- updates to try to solve deal stream problems attempt #1 + +### Changelog +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update for 0.2.1 release + - update to v26 proofs (#232) ([filecoin-project/go-fil-markets#232](https://github.com/filecoin-project/go-fil-markets/pull/232)) + - Don't Keep Streams Open (#230) ([filecoin-project/go-fil-markets#230](https://github.com/filecoin-project/go-fil-markets/pull/230)) + - Round-trip storage/retrieval test (#229) ([filecoin-project/go-fil-markets#229](https://github.com/filecoin-project/go-fil-markets/pull/229)) + - feat(storagemarket): improve human readable constant maps (#228) ([filecoin-project/go-fil-markets#228](https://github.com/filecoin-project/go-fil-markets/pull/228)) + - fix(deps): update data-transfer 0.3.0 (#227) ([filecoin-project/go-fil-markets#227](https://github.com/filecoin-project/go-fil-markets/pull/227)) + - docs(CHANGELOG): update changelog for 0.2.0 release ([filecoin-project/go-fil-markets#226](https://github.com/filecoin-project/go-fil-markets/pull/226)) +- github.com/filecoin-project/go-data-transfer (v0.2.1 -> v0.3.0): + - feat(graphsyncimpl): fix open/close events (#52) ([filecoin-project/go-data-transfer#52](https://github.com/filecoin-project/go-data-transfer/pull/52)) + - chore(deps): update graphsync ([filecoin-project/go-data-transfer#51](https://github.com/filecoin-project/go-data-transfer/pull/51)) + - Refactor registry and encoding (#50) ([filecoin-project/go-data-transfer#50](https://github.com/filecoin-project/go-data-transfer/pull/50)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 5 | +1841/-1303 | 59 | +| Shannon Wells | 1 | +511/-141 | 19 | +| hannahhoward | 1 | +11/-1 | 1 | +| Erin Swenson-Healey | 1 | +1/-1 | 1 | + +# go-fil-markets 0.2.0 + +Asynchronous operations release -- we no longer synchronously wait for chain messages to push + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update changelog for 0.2.0 release + - Storage Market Changes Based On Lotus Integration (#223) ([filecoin-project/go-fil-markets#223](https://github.com/filecoin-project/go-fil-markets/pull/223)) + - Merge in hotfix 0.1.3 ([filecoin-project/go-fil-markets#225](https://github.com/filecoin-project/go-fil-markets/pull/225)) + - ppl can sub to storage client evts (#217) ([filecoin-project/go-fil-markets#217](https://github.com/filecoin-project/go-fil-markets/pull/217)) + - fix(storagemarket): set miner peer id on deals (#216) ([filecoin-project/go-fil-markets#216](https://github.com/filecoin-project/go-fil-markets/pull/216)) + - chore(release): merge hotfix 0.1.2 branch back + - docs(release): update release process (#212) ([filecoin-project/go-fil-markets#212](https://github.com/filecoin-project/go-fil-markets/pull/212)) + - Nonblocking storage deals [#80] (#194) ([filecoin-project/go-fil-markets#194](https://github.com/filecoin-project/go-fil-markets/pull/194)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 1 | +993/-608 | 13 | +| Hannah Howard | 3 | +101/-59 | 14 | +| Shannon Wells | 1 | +106/-31 | 5 | +| hannahhoward | 1 | +8/-0 | 1 | + +# go-fil-markets 0.1.3 + +Hotfix release for critical graphsync bug fix + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): add release documentation + - fix(deps): update to tagged data transfer + - chore(deps): update data transfer + graphsync +- github.com/filecoin-project/go-data-transfer (v0.2.0 -> v0.2.1): + - chore(deps): update graphsync +- github.com/ipfs/go-graphsync (v0.0.6-0.20200428204348-97a8cf76a482 -> v0.0.6-0.20200504202014-9d5f2c26a103): + - fix(responsemanager): add nil check (#67) ([ipfs/go-graphsync#67](https://github.com/ipfs/go-graphsync/pull/67)) + - Add autocomment configuration + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hector Sanjuan | 1 | +68/-0 | 1 | +| hannahhoward | 4 | +20/-12 | 7 | +| Hannah Howard | 1 | +4/-0 | 1 | + +# go-fil-markets 0.1.2 + +Hotfix release for transitive dependencies to use new go-ipld-prime + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(CHANGELOG): update changelog + - Upgrade IPLD-prime to latest (#215) ([filecoin-project/go-fil-markets#215](https://github.com/filecoin-project/go-fil-markets/pull/215)) +- github.com/filecoin-project/go-data-transfer (v0.0.0-20200408061858-82c58b423ca6 -> v0.2.0): + - Upgrade graphsync + ipld-prime (#49) ([filecoin-project/go-data-transfer#49](https://github.com/filecoin-project/go-data-transfer/pull/49)) + - Use extracted generic pubsub (#48) ([filecoin-project/go-data-transfer#48](https://github.com/filecoin-project/go-data-transfer/pull/48)) + - Refactor & Cleanup In Preparation For Added Complexity (#47) ([filecoin-project/go-data-transfer#47](https://github.com/filecoin-project/go-data-transfer/pull/47)) + - feat(graphsync): complete notifications for responder (#46) ([filecoin-project/go-data-transfer#46](https://github.com/filecoin-project/go-data-transfer/pull/46)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200408061628-e1a98fc64c42 -> v0.0.6-0.20200428204348-97a8cf76a482): + - refactor(hooks): use external pubsub (#65) ([ipfs/go-graphsync#65](https://github.com/ipfs/go-graphsync/pull/65)) + - Update of IPLD Prime (#66) ([ipfs/go-graphsync#66](https://github.com/ipfs/go-graphsync/pull/66)) + - Add standard issue template + - feat(responsemanager): add listener for completed responses (#64) ([ipfs/go-graphsync#64](https://github.com/ipfs/go-graphsync/pull/64)) + - Update Requests (#63) ([ipfs/go-graphsync#63](https://github.com/ipfs/go-graphsync/pull/63)) + - Add pausing and unpausing of requests (#62) ([ipfs/go-graphsync#62](https://github.com/ipfs/go-graphsync/pull/62)) + - ci(circle): remove benchmark task for now + - ci(circle): update orb + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 10 | +5409/-4023 | 151 | +| Hector Sanjuan | 1 | +27/-0 | 2 | +| hannahhoward | 3 | +16/-8 | 5 | + +# go-fil-markets 0.1.1 + +Hotfix release for spec actors update + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - chore(changelog): update changelog for tagged release + - Upgrade to specs-actors v0.3.0 (#207) ([filecoin-project/go-fil-markets#207](https://github.com/filecoin-project/go-fil-markets/pull/207)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| hannahhoward | 1 | +9/-1 | 1 | +| Alex North | 1 | +3/-3 | 2 | + +# go-fil-markets 0.1.0 + +Initial tagged release for Filecoin Testnet Phase 2 + +### Changelog + +- github.com/filecoin-project/go-fil-markets: + - docs(release): document release process (#206) ([filecoin-project/go-fil-markets#206](https://github.com/filecoin-project/go-fil-markets/pull/206)) + - update types_cbor_gen (#203) ([filecoin-project/go-fil-markets#203](https://github.com/filecoin-project/go-fil-markets/pull/203)) + - Upgrade to specs-actors v0.2.0 (#204) ([filecoin-project/go-fil-markets#204](https://github.com/filecoin-project/go-fil-markets/pull/204)) + - Storagemarket/provider allows subscription to events (#202) ([filecoin-project/go-fil-markets#202](https://github.com/filecoin-project/go-fil-markets/pull/202)) + - Add a test rule to Makefile, use in CI config (#200) ([filecoin-project/go-fil-markets#200](https://github.com/filecoin-project/go-fil-markets/pull/200)) + - Update to specs-actors v1.0.0 (#198) ([filecoin-project/go-fil-markets#198](https://github.com/filecoin-project/go-fil-markets/pull/198)) + - add multiple peers per payloadCID (#197) ([filecoin-project/go-fil-markets#197](https://github.com/filecoin-project/go-fil-markets/pull/197)) + - refactor(storedcounter): use extracted package (#196) ([filecoin-project/go-fil-markets#196](https://github.com/filecoin-project/go-fil-markets/pull/196)) + - Feat/no block chain ops (#190) ([filecoin-project/go-fil-markets#190](https://github.com/filecoin-project/go-fil-markets/pull/190)) + - Add a max piece size to storage asks (#188) ([filecoin-project/go-fil-markets#188](https://github.com/filecoin-project/go-fil-markets/pull/188)) + - Update proofs to v25 params (#189) ([filecoin-project/go-fil-markets#189](https://github.com/filecoin-project/go-fil-markets/pull/189)) + - Update Graphsync (#184) ([filecoin-project/go-fil-markets#184](https://github.com/filecoin-project/go-fil-markets/pull/184)) + - Support selectors on retrieval (#187) ([filecoin-project/go-fil-markets#187](https://github.com/filecoin-project/go-fil-markets/pull/187)) + - Add optional PieceCID to block unsealing (#186) ([filecoin-project/go-fil-markets#186](https://github.com/filecoin-project/go-fil-markets/pull/186)) + - Add Selector to retrieval params (#175) ([filecoin-project/go-fil-markets#175](https://github.com/filecoin-project/go-fil-markets/pull/175)) + - use PieceCID if provided in QueryParams (#181) ([filecoin-project/go-fil-markets#181](https://github.com/filecoin-project/go-fil-markets/pull/181)) + - include rejection reason in client response (#182) ([filecoin-project/go-fil-markets#182](https://github.com/filecoin-project/go-fil-markets/pull/182)) + - Do not create CAR file when propsing a storage deal using Manual Transfer (#183) ([filecoin-project/go-fil-markets#183](https://github.com/filecoin-project/go-fil-markets/pull/183)) + - add selector to BlockIO classes (#178) ([filecoin-project/go-fil-markets#178](https://github.com/filecoin-project/go-fil-markets/pull/178)) + - rename list deals interface & impls (#174) ([filecoin-project/go-fil-markets#174](https://github.com/filecoin-project/go-fil-markets/pull/174)) + - Feat/configure start epoch buffer (#171) ([filecoin-project/go-fil-markets#171](https://github.com/filecoin-project/go-fil-markets/pull/171)) + - send tipset identifier to node when interacting with chain (#172) ([filecoin-project/go-fil-markets#172](https://github.com/filecoin-project/go-fil-markets/pull/172)) + - Support Retrieval By Any CID, Not Just Root (#166) ([filecoin-project/go-fil-markets#166](https://github.com/filecoin-project/go-fil-markets/pull/166)) + - v24 groth parameters (#167) ([filecoin-project/go-fil-markets#167](https://github.com/filecoin-project/go-fil-markets/pull/167)) + - Add TipSetToken to SavePaymentVoucher (#165) ([filecoin-project/go-fil-markets#165](https://github.com/filecoin-project/go-fil-markets/pull/165)) + - retrieval client node interface passes tipset identifier to node (#164) ([filecoin-project/go-fil-markets#164](https://github.com/filecoin-project/go-fil-markets/pull/164)) + - send state identifiery when getting miner worker address (#153) ([filecoin-project/go-fil-markets#153](https://github.com/filecoin-project/go-fil-markets/pull/153)) + - chore(deps): update to ipld/go-car (#152) ([filecoin-project/go-fil-markets#152](https://github.com/filecoin-project/go-fil-markets/pull/152)) + - add TipSet identity-producing method to various Node interfaces (#149) ([filecoin-project/go-fil-markets#149](https://github.com/filecoin-project/go-fil-markets/pull/149)) + - conform imports to schema defined in CONTRIBUTING.md (#150) ([filecoin-project/go-fil-markets#150](https://github.com/filecoin-project/go-fil-markets/pull/150)) + - Refactor Storage Provider to FSM Module (#145) ([filecoin-project/go-fil-markets#145](https://github.com/filecoin-project/go-fil-markets/pull/145)) + - Feat/update to fix 32gib verification (#147) ([filecoin-project/go-fil-markets#147](https://github.com/filecoin-project/go-fil-markets/pull/147)) + - ci(codecov): remove cbor gen files from coverage + - ci(codecov): ignore cbor gen files (#146) ([filecoin-project/go-fil-markets#146](https://github.com/filecoin-project/go-fil-markets/pull/146)) + - Storage Client Statemachine Refactor (#136) ([filecoin-project/go-fil-markets#136](https://github.com/filecoin-project/go-fil-markets/pull/136)) + - upgrade to libfilecoin version that supports cache clearing (#138) ([filecoin-project/go-fil-markets#138](https://github.com/filecoin-project/go-fil-markets/pull/138)) + - fix(cborgen): update cbor gen for dataref (#137) ([filecoin-project/go-fil-markets#137](https://github.com/filecoin-project/go-fil-markets/pull/137)) + - allow manual piece commitment (#135) ([filecoin-project/go-fil-markets#135](https://github.com/filecoin-project/go-fil-markets/pull/135)) + - fix(retrievalmarket): handle self-retrieval correctly (#134) ([filecoin-project/go-fil-markets#134](https://github.com/filecoin-project/go-fil-markets/pull/134)) + - feat(retrievalmarket): support wallet address (#130) ([filecoin-project/go-fil-markets#130](https://github.com/filecoin-project/go-fil-markets/pull/130)) + - allow specification of 'wallet' for ensure funds calls (#129) ([filecoin-project/go-fil-markets#129](https://github.com/filecoin-project/go-fil-markets/pull/129)) + - update to filecoin-ffi with shared types (#127) ([filecoin-project/go-fil-markets#127](https://github.com/filecoin-project/go-fil-markets/pull/127)) + - feat(sharedcounter): persist counter to disk (#125) ([filecoin-project/go-fil-markets#125](https://github.com/filecoin-project/go-fil-markets/pull/125)) + - Use go-statemachine + FSMs in retrieval market (#124) ([filecoin-project/go-fil-markets#124](https://github.com/filecoin-project/go-fil-markets/pull/124)) + - storage client: Call EnsureFunds more correctly (#123) ([filecoin-project/go-fil-markets#123](https://github.com/filecoin-project/go-fil-markets/pull/123)) + - use latest specs-actors with uint64 lane and nonce from paych.Actor (#122) ([filecoin-project/go-fil-markets#122](https://github.com/filecoin-project/go-fil-markets/pull/122)) + - Update go-sectorbuilder to latest that uses specs-actors types (#121) ([filecoin-project/go-fil-markets#121](https://github.com/filecoin-project/go-fil-markets/pull/121)) + - Import spec actor types (#118) ([filecoin-project/go-fil-markets#118](https://github.com/filecoin-project/go-fil-markets/pull/118)) + - Update README (#120) ([filecoin-project/go-fil-markets#120](https://github.com/filecoin-project/go-fil-markets/pull/120)) + - chore(cborgen): update cborgen + - Merge branch 'head/lotus' into lotus/merge-02-10-2020 + - Storage Market integration test (#119) ([filecoin-project/go-fil-markets#119](https://github.com/filecoin-project/go-fil-markets/pull/119)) + - fix(storagemarket): add back in cid recording (#115) ([filecoin-project/go-fil-markets#115](https://github.com/filecoin-project/go-fil-markets/pull/115)) + - fix(storagemarket): assign net member (#114) ([filecoin-project/go-fil-markets#114](https://github.com/filecoin-project/go-fil-markets/pull/114)) + - Fix/flaky tests (#113) ([filecoin-project/go-fil-markets#113](https://github.com/filecoin-project/go-fil-markets/pull/113)) + - Storage market network abstraction (#109) ([filecoin-project/go-fil-markets#109](https://github.com/filecoin-project/go-fil-markets/pull/109)) + - Remove Sector ID from MinerDeal (merge from head/lotus -- PLEASE USE MERGE COMMIT) ([filecoin-project/go-fil-markets#112](https://github.com/filecoin-project/go-fil-markets/pull/112)) + - No Filestore On Storage Client (#107) ([filecoin-project/go-fil-markets#107](https://github.com/filecoin-project/go-fil-markets/pull/107)) + - take miner address as parameter (#108) ([filecoin-project/go-fil-markets#108](https://github.com/filecoin-project/go-fil-markets/pull/108)) + - skip flaky 1 block tests (#104) ([filecoin-project/go-fil-markets#104](https://github.com/filecoin-project/go-fil-markets/pull/104)) + - use go-padreader instead of local copy (#103) ([filecoin-project/go-fil-markets#103](https://github.com/filecoin-project/go-fil-markets/pull/103)) + - Handle sector id in the `OnDealSectorCommitted` callback (#58) ([filecoin-project/go-fil-markets#58](https://github.com/filecoin-project/go-fil-markets/pull/58)) + - Properly Implement Retrieval Lookups Based on CIDs (#57) ([filecoin-project/go-fil-markets#57](https://github.com/filecoin-project/go-fil-markets/pull/57)) + - Add Stop funcs to retrieval providers (#56) ([filecoin-project/go-fil-markets#56](https://github.com/filecoin-project/go-fil-markets/pull/56)) + - refactor(retrievalmarket): switch to payload CIDs (#55) ([filecoin-project/go-fil-markets#55](https://github.com/filecoin-project/go-fil-markets/pull/55)) + - Move to an explicit piecestore and explicit unsealing. (#54) ([filecoin-project/go-fil-markets#54](https://github.com/filecoin-project/go-fil-markets/pull/54)) + - Improve test coverage, fix any bugs (#53) ([filecoin-project/go-fil-markets#53](https://github.com/filecoin-project/go-fil-markets/pull/53)) + - Techdebt/1 block file retrieval test (#51) ([filecoin-project/go-fil-markets#51](https://github.com/filecoin-project/go-fil-markets/pull/51)) + - ci(config): use large resource_class (#52) ([filecoin-project/go-fil-markets#52](https://github.com/filecoin-project/go-fil-markets/pull/52)) + - Sync up DealState to match spec (#50) ([filecoin-project/go-fil-markets#50](https://github.com/filecoin-project/go-fil-markets/pull/50)) + - Support arbitrary dag retrieval (#46) ([filecoin-project/go-fil-markets#46](https://github.com/filecoin-project/go-fil-markets/pull/46)) + - RetrievalMarket: Query + Deal integration test, + bug fixes uncovered during writing the test (#36) ([filecoin-project/go-fil-markets#36](https://github.com/filecoin-project/go-fil-markets/pull/36)) + - Remove filestore as a go between with StorageMiner, pass direct io.reader to StorageMiner (#49) ([filecoin-project/go-fil-markets#49](https://github.com/filecoin-project/go-fil-markets/pull/49)) + - Feat/find providers (#43) ([filecoin-project/go-fil-markets#43](https://github.com/filecoin-project/go-fil-markets/pull/43)) + - Retrieval Deals, Spec V0 (#37) ([filecoin-project/go-fil-markets#37](https://github.com/filecoin-project/go-fil-markets/pull/37)) + - Lotus updates ([filecoin-project/go-fil-markets#45](https://github.com/filecoin-project/go-fil-markets/pull/45)) + - storagemarket: close channel on return (#42) ([filecoin-project/go-fil-markets#42](https://github.com/filecoin-project/go-fil-markets/pull/42)) + - Feat/verify data before publishing deal (#40) ([filecoin-project/go-fil-markets#40](https://github.com/filecoin-project/go-fil-markets/pull/40)) + - Use CAR and padding for piece data (#27) ([filecoin-project/go-fil-markets#27](https://github.com/filecoin-project/go-fil-markets/pull/27)) + - Upgrade Query Protocol to Spec V0 (#25) ([filecoin-project/go-fil-markets#25](https://github.com/filecoin-project/go-fil-markets/pull/25)) + - Merge branch 'lotus-updates' + - fix(retrievalmarket): add mutex around subscribers (#32) (#33) ([filecoin-project/go-fil-markets#33](https://github.com/filecoin-project/go-fil-markets/pull/33)) + - ci(codecov): disable status, display report (#31) ([filecoin-project/go-fil-markets#31](https://github.com/filecoin-project/go-fil-markets/pull/31)) + - Flaky test fix (#28) ([filecoin-project/go-fil-markets#28](https://github.com/filecoin-project/go-fil-markets/pull/28)) + - skip flaky test (#30) ([filecoin-project/go-fil-markets#30](https://github.com/filecoin-project/go-fil-markets/pull/30)) + - Network Abstraction For Retrieval Market (#17) ([filecoin-project/go-fil-markets#17](https://github.com/filecoin-project/go-fil-markets/pull/17)) + - Use CAR file in generation of CommP (#26) ([filecoin-project/go-fil-markets#26](https://github.com/filecoin-project/go-fil-markets/pull/26)) + - filestore: track close err, lints (#20) ([filecoin-project/go-fil-markets#20](https://github.com/filecoin-project/go-fil-markets/pull/20)) + - Deleting datatransfer files (#19) ([filecoin-project/go-fil-markets#19](https://github.com/filecoin-project/go-fil-markets/pull/19)) + - Use shared go-filecoin packages go-cbor-util, go-address, go-crypto, (#22) ([filecoin-project/go-fil-markets#22](https://github.com/filecoin-project/go-fil-markets/pull/22)) + - Storage Market Extraction (#15) ([filecoin-project/go-fil-markets#15](https://github.com/filecoin-project/go-fil-markets/pull/15)) + - Retrieval Market Extraction (#13) ([filecoin-project/go-fil-markets#13](https://github.com/filecoin-project/go-fil-markets/pull/13)) + - PieceIO improvements (#12) ([filecoin-project/go-fil-markets#12](https://github.com/filecoin-project/go-fil-markets/pull/12)) + - fix links in datatransfer README (#11) ([filecoin-project/go-fil-markets#11](https://github.com/filecoin-project/go-fil-markets/pull/11)) + - fix(build): fix tools build error (#14) ([filecoin-project/go-fil-markets#14](https://github.com/filecoin-project/go-fil-markets/pull/14)) + - fix(tokenamount): fix naming (#10) ([filecoin-project/go-fil-markets#10](https://github.com/filecoin-project/go-fil-markets/pull/10)) + - feat(shared): add shared tools and types (#9) ([filecoin-project/go-fil-markets#9](https://github.com/filecoin-project/go-fil-markets/pull/9)) + - add circle config, let's ci ([filecoin-project/go-fil-markets#7](https://github.com/filecoin-project/go-fil-markets/pull/7)) + - Skeleton readme ([filecoin-project/go-fil-markets#5](https://github.com/filecoin-project/go-fil-markets/pull/5)) + - Feat/datatransfer readme, contributing, design doc (rename) + - Piece IO ([filecoin-project/go-fil-markets#2](https://github.com/filecoin-project/go-fil-markets/pull/2)) + - Feat/datatransfer graphsync movein ([filecoin-project/go-fil-markets#1](https://github.com/filecoin-project/go-fil-markets/pull/1)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 38 | +27080/-10375 | 455 | +| Ingar Shu | 10 | +1315/-6870 | 127 | +| shannonwells | 12 | +5500/-70 | 48 | +| Shannon Wells | 20 | +2671/-940 | 109 | +| ergastic | 4 | +1835/-501 | 47 | +| Erin Swenson-Healey | 9 | +516/-408 | 112 | +| hannahhoward | 10 | +497/-150 | 79 | +| Łukasz Magiera | 4 | +379/-139 | 19 | +| whyrusleeping | 3 | +239/-87 | 19 | +| Whyrusleeping | 4 | +192/-96 | 26 | +| Aayush Rajasekaran | 3 | +93/-13 | 14 | +| Mosh | 2 | +37/-8 | 2 | +| Ignacio Hagopian | 2 | +9/-11 | 2 | +| Alex North | 2 | +11/-7 | 4 | +| Alex Cruikshank | 1 | +1/-9 | 1 | + +### 🙌🏽 Want to contribute? + +Would you like to contribute to this repo and don’t know how? Here are a few places you can get started: + +- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-fil-markets/blob/master/CONTRIBUTING.md) +- Look for issues with the `good-first-issue` label in [go-fil-markets](https://github.com/filecoin-project/go-fil-markets/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22e-good-first-issue%22+) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/CONTRIBUTING.md b/extern/sxx-go-fil-markets@v1.24.0-v17/CONTRIBUTING.md new file mode 100644 index 00000000000..310ebeaf680 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/CONTRIBUTING.md @@ -0,0 +1,96 @@ +# Contributing to this repo + +First, thank you for your interest in contributing to this project! Before you pick up your first issue and start +changing code, please: + +1. Review all documentation for the module you're interested in. +1. Look through the [issues for this repo](https://github.com/filecoin-project/go-fil-markets/issues) for relevant discussions. +1. If you have questions about an issue, post a comment in the issue. +1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. + +For best results, before submitting a PR, make sure: +1. It has met all acceptance criteria for the issue. +1. It addresses only the one issue and does not make other, irrelevant changes. +1. Your code conforms to our coding style guide. +1. You have adequate test coverage (this should be indicated by CI results anyway). +1. If you like, check out [current PRs](https://github.com/filecoin-project/go-fil-markets/pulls) to see how others do it. + +Special Note: +If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). + +### PR Process + +Active development of `go-fil-markets` occurs on the `master` branch. All PRs should be made to the `master` branch, which is the default branch on Github. + +Before a PR can be merged to `master`, it must: +1. Pass continuous integration. +1. Be rebased and up to date with the `master` branch +1. Be approved by at least two maintainers + +When merging normal PRs to `master`, always use squash and merge to maintain a linear commit history. + +### Release Process + +When creating a new full release, branch off master with a branch named release/*version-number*, where *version-number* is the ultimate tag you intend to create. + +Continue to develop on master and merge commits to your release branch as neccesary till the release is ready. + +When the release is ready, tag it, then merge the branch back into master so that it is part of the version history of master. Delete the release branch. + +### Hotfix Process + +Hot-fixes operate just like release branches, except they are branched off an existing tag and should be named hotfix/*version-number*. When ready, they receive their own tag and then are merged back to master, then deleted. + +For external reference, his git flow and release process is essentially the [OneFlow git workflow](https://www.endoflineblog.com/oneflow-a-git-branching-model-and-workflow) + +Following the release of Filecoin Mainnet, this library will following a semantic versioning scheme for tagged releases. + +### Testing + +- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. +- Integration tests should test integration, not comprehensive functionality +- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. + +### Conventions and Style + +#### Imports +We use the following import ordering. +``` +import ( + [stdlib packages, alpha-sorted] + + [external packages] + + [other-filecoin-project packages] + + [go-fil-markets packages] +) +``` + +Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). + +Example: + +```go +import ( + "context" + "testing" + + cmds "github.com/ipfs/go-ipfs-cmds" + cid "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "github.com/stretchr/testify/assert" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/filestore/file" +) +``` + +You can run `script/fiximports` to put all your code in the desired format +#### Comments +Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. + +- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). +- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). +- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/COPYRIGHT b/extern/sxx-go-fil-markets@v1.24.0-v17/COPYRIGHT new file mode 100644 index 00000000000..771e6f7cd77 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-APACHE b/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-APACHE new file mode 100644 index 00000000000..546514363d4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-MIT b/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-MIT new file mode 100644 index 00000000000..ea532a83059 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/Makefile b/extern/sxx-go-fil-markets@v1.24.0-v17/Makefile new file mode 100644 index 00000000000..148453bb63c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/Makefile @@ -0,0 +1,67 @@ +all: build +.PHONY: all + +SUBMODULES= + +FFI_PATH:=./extern/filecoin-ffi/ +FFI_DEPS:=.install-filcrypto +FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) + +$(FFI_DEPS): .filecoin-build ; + +.filecoin-build: $(FFI_PATH) + $(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%) + @touch $@ + +.update-modules: + git submodule update --init --recursive + @touch $@ + +build: .update-modules .filecoin-build + go build ./... + +test: build + gotestsum -- -coverprofile=coverage.txt -timeout 5m ./... + +clean: + rm -f .filecoin-build + rm -f .update-modules + rm -f coverage.txt + +DOTs=$(shell find docs -name '*.dot') +MMDs=$(shell find docs -name '*.mmd') +SVGs=$(DOTs:%=%.svg) $(MMDs:%=%.svg) +PNGs=$(DOTs:%=%.png) $(MMDs:%=%.png) + +node_modules: package.json + npm install + +diagrams: ${MMDs} ${SVGs} ${PNGs} + +%.mmd.svg: %.mmd + node_modules/.bin/mmdc -i $< -o $@ + +%.mmd.png: %.mmd + node_modules/.bin/mmdc -i $< -o $@ + +FORCE: + +docsgen: FORCE .update-modules .filecoin-build + go run ./docsgen + +$(MMDs): docsgen node_modules + +imports: FORCE + scripts/fiximports + +cbor-gen: FORCE + go generate ./... + +tidy: FORCE + go mod tidy + +lint: FORCE + git fetch + golangci-lint run -v --concurrency 2 --new-from-rev origin/master + +prepare-pr: cbor-gen tidy diagrams imports lint diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/README.md b/extern/sxx-go-fil-markets@v1.24.0-v17/README.md new file mode 100644 index 00000000000..76fa8512102 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/README.md @@ -0,0 +1,60 @@ +# go-fil-markets +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-fil-markets.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-fil-markets) +[![codecov](https://codecov.io/gh/filecoin-project/go-fil-markets/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-fil-markets) +[![GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets?status.svg)](https://godoc.org/github.com/filecoin-project/go-fil-markets) + +This repository contains modular implementations of the [storage and retrieval market subsystems][1] of Filecoin. +They are guided by the [v1.0 and 1.1 Filecoin specification updates](https://filecoin-project.github.io/specs/#intro__changelog). + +Separating implementations into a blockchain component and one or more mining and market components presents an opportunity to encourage implementation diversity while reusing non-security-critical components. + +## Disclaimer: Reporting issues + +This repo shared the issue tracker with lotus. Please report your issues at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues) + +## Components + +* **[storagemarket](./storagemarket)**: for finding, negotiating, and consummating deals to + store data between clients and providers (storage miners). +* **[retrievalmarket](./retrievalmarket)**: for finding, negotiating, and consummating deals to + retrieve data between clients and providers (retrieval miners). +* **[filestore](./filestore)**: a wrapper around os.File for use by pieceio, storagemarket, and retrievalmarket. +* **[pieceio](./pieceio)**: utilities that take IPLD graphs and turn them into pieces. Used by storagemarket. +* **[piecestore](./piecestore)**: a database for storing deal-related PieceInfo and CIDInfo. +Used by storagemarket and retrievalmarket. + +Related components in other repos: +* **[go-data-transfer](https://github.com/filecoin-project/go-data-transfer)**: for exchanging piece data between clients and miners, used by storage & retrieval market modules. + +### Background reading + +* The [Markets in Filecoin][1] +section of the Filecoin Specification contains the canonical spec. + +### Technical Documentation +* [GoDoc for Storage Market](https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket) contains an architectural overview and robust API documentation +* [GoDoc for Retrieval Market](https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket) contains an architectural overview and robust API documentation + +## Installation +```bash +go get "github.com/filecoin-project/go-fil-markets/"` +``` + +## Usage +Documentation is in the README for each module, listed in [Components](#Components). + +## Contributing +Issues and PRs are welcome! Please first read the [background reading](#background-reading) and [CONTRIBUTING](.go-fil-markets/CONTRIBUTING.md) guide, and look over the current code. PRs against master require approval of at least two maintainers. + +Day-to-day discussion takes place in the #fil-components channel of the [Filecoin project chat](https://github.com/filecoin-project/community#chat). Usage or design questions are welcome. + +## Project-level documentation +The filecoin-project has a [community repo](https://github.com/filecoin-project/community) with more detail about our resources and policies, such as the [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. + +[1]:https://spec.filecoin.io/#section-systems.filecoin_markets diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/SECURITY.md b/extern/sxx-go-fil-markets@v1.24.0-v17/SECURITY.md new file mode 100644 index 00000000000..0e810dfd2e1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Reporting a Vulnerability + +For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) + +## Reporting a non security bug + +For non-critical bugs, please simply file a GitHub issue on this repo. diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/codecov.yml b/extern/sxx-go-fil-markets@v1.24.0-v17/codecov.yml new file mode 100644 index 00000000000..30c84f38ede --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/codecov.yml @@ -0,0 +1,10 @@ +coverage: + precision: 2 + round: up + range: "50...90" + ignore: + - "**/*_cbor_gen.go" + status: + project: off + patch: off + diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/commp/commp.go b/extern/sxx-go-fil-markets@v1.24.0-v17/commp/commp.go new file mode 100644 index 00000000000..fd230562308 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/commp/commp.go @@ -0,0 +1,45 @@ +package commp + +import ( + "io" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-commp-utils/writer" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" +) + +func GenerateCommp(reader io.Reader, payloadSize uint64, targetSize uint64) (cid.Cid, error) { + // dump the CARv1 payload of the CARv2 file to the Commp Writer and get back the CommP. + w := &writer.Writer{} + written, err := io.Copy(w, reader) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to write to CommP writer: %w", err) + } + if written != int64(payloadSize) { + return cid.Undef, xerrors.Errorf("number of bytes written to CommP writer %d not equal to the CARv1 payload size %d", written, payloadSize) + } + + cidAndSize, err := w.Sum() + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get CommP: %w", err) + } + + if uint64(cidAndSize.PieceSize) < targetSize { + // need to pad up! + rawPaddedCommp, err := commp.PadCommP( + // we know how long a pieceCid "hash" is, just blindly extract the trailing 32 bytes + cidAndSize.PieceCID.Hash()[len(cidAndSize.PieceCID.Hash())-32:], + uint64(cidAndSize.PieceSize), + uint64(targetSize), + ) + if err != nil { + return cid.Undef, err + } + cidAndSize.PieceCID, _ = commcid.DataCommitmentV1ToCID(rawPaddedCommp) + } + + return cidAndSize.PieceCID, err +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/discovery.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/discovery.go new file mode 100644 index 00000000000..d105dcc07e5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/discovery.go @@ -0,0 +1,9 @@ +package discoveryimpl + +import ( + "github.com/filecoin-project/go-fil-markets/discovery" +) + +func Multi(r discovery.PeerResolver) discovery.PeerResolver { // TODO: actually support multiple mechanisms + return r +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local.go new file mode 100644 index 00000000000..efc12c2e58c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local.go @@ -0,0 +1,121 @@ +package discoveryimpl + +import ( + "bytes" + "context" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + logging "github.com/ipfs/go-log/v2" + + cborutil "github.com/filecoin-project/go-cbor-util" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedds "github.com/filecoin-project/go-ds-versioning/pkg/datastore" + + "github.com/filecoin-project/go-fil-markets/discovery" + "github.com/filecoin-project/go-fil-markets/discovery/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var log = logging.Logger("retrieval-discovery") + +type Local struct { + ds datastore.Datastore + migrateDs func(context.Context) error + readySub *pubsub.PubSub +} + +func NewLocal(ds datastore.Batching) (*Local, error) { + migrations, err := migrations.RetrievalPeersMigrations.Build() + if err != nil { + return nil, err + } + versionedDs, migrateDs := versionedds.NewVersionedDatastore(ds, migrations, versioning.VersionKey("1")) + readySub := pubsub.New(shared.ReadyDispatcher) + return &Local{versionedDs, migrateDs, readySub}, nil +} + +func (l *Local) Start(ctx context.Context) error { + go func() { + err := l.migrateDs(ctx) + if err != nil { + log.Errorf("Migrating retrieval peers: %s", err.Error()) + } + err = l.readySub.Publish(err) + if err != nil { + log.Warnf("Publishing retrieval peers list ready event: %s", err.Error()) + } + }() + return nil +} + +// OnReady registers a listener for when the retrieval peers list has finished starting up +func (l *Local) OnReady(ready shared.ReadyFunc) { + l.readySub.Subscribe(ready) +} + +func (l *Local) AddPeer(ctx context.Context, cid cid.Cid, peer retrievalmarket.RetrievalPeer) error { + key := dshelp.MultihashToDsKey(cid.Hash()) + exists, err := l.ds.Has(ctx, key) + if err != nil { + return err + } + + var newRecord bytes.Buffer + + if !exists { + peers := discovery.RetrievalPeers{Peers: []retrievalmarket.RetrievalPeer{peer}} + err = cborutil.WriteCborRPC(&newRecord, &peers) + if err != nil { + return err + } + } else { + entry, err := l.ds.Get(ctx, key) + if err != nil { + return err + } + var peers discovery.RetrievalPeers + if err = cborutil.ReadCborRPC(bytes.NewReader(entry), &peers); err != nil { + return err + } + if hasPeer(peers, peer) { + return nil + } + peers.Peers = append(peers.Peers, peer) + err = cborutil.WriteCborRPC(&newRecord, &peers) + if err != nil { + return err + } + } + + return l.ds.Put(ctx, key, newRecord.Bytes()) +} + +func hasPeer(peerList discovery.RetrievalPeers, peer retrievalmarket.RetrievalPeer) bool { + for _, p := range peerList.Peers { + if p == peer { + return true + } + } + return false +} + +func (l *Local) GetPeers(payloadCID cid.Cid) ([]retrievalmarket.RetrievalPeer, error) { + entry, err := l.ds.Get(context.TODO(), dshelp.MultihashToDsKey(payloadCID.Hash())) + if err == datastore.ErrNotFound { + return []retrievalmarket.RetrievalPeer{}, nil + } + if err != nil { + return nil, err + } + var peers discovery.RetrievalPeers + if err := cborutil.ReadCborRPC(bytes.NewReader(entry), &peers); err != nil { + return nil, err + } + return peers.Peers, nil +} + +var _ discovery.PeerResolver = &Local{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local_test.go new file mode 100644 index 00000000000..0ba49830152 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/impl/local_test.go @@ -0,0 +1,126 @@ +package discoveryimpl_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/discovery/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalmigrations "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestLocal_AddPeer(t *testing.T) { + ctx := context.Background() + peer1 := retrievalmarket.RetrievalPeer{ + Address: shared_testutil.NewIDAddr(t, 1), + ID: peer.NewPeerRecord().PeerID, + PieceCID: nil, + } + pieceCid := shared_testutil.GenerateCids(1)[0] + peer2 := retrievalmarket.RetrievalPeer{ + Address: shared_testutil.NewIDAddr(t, 2), + ID: peer.NewPeerRecord().PeerID, + PieceCID: &pieceCid, + } + testCases := []struct { + name string + peers2add []retrievalmarket.RetrievalPeer + expPeers []retrievalmarket.RetrievalPeer + }{ + { + name: "can add 3 peers", + peers2add: []retrievalmarket.RetrievalPeer{peer1, peer2}, + expPeers: []retrievalmarket.RetrievalPeer{peer1, peer2}, + }, + { + name: "can add same peer without duping", + peers2add: []retrievalmarket.RetrievalPeer{peer1, peer1}, + expPeers: []retrievalmarket.RetrievalPeer{peer1}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ds := datastore.NewMapDatastore() + l, err := discoveryimpl.NewLocal(ds) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, l) + + payloadCID := shared_testutil.GenerateCids(1)[0] + for _, testpeer := range tc.peers2add { + require.NoError(t, l.AddPeer(ctx, payloadCID, testpeer)) + } + actualPeers, err := l.GetPeers(payloadCID) + require.NoError(t, err) + assert.Equal(t, len(tc.expPeers), len(actualPeers)) + assert.Equal(t, tc.expPeers[0], actualPeers[0]) + }) + } +} + +func TestLocalMigrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ds := datastore.NewMapDatastore() + + peers := shared_testutil.GeneratePeers(4) + pieceCIDs := shared_testutil.GenerateCids(4) + payloadCids := shared_testutil.GenerateCids(2) + for i, c := range payloadCids { + rps := migrations.RetrievalPeers0{ + Peers: []retrievalmigrations.RetrievalPeer0{ + { + Address: address.TestAddress, + ID: peers[i*2], + PieceCID: &pieceCIDs[i*2], + }, + { + Address: address.TestAddress2, + ID: peers[i*2+1], + PieceCID: &pieceCIDs[i*2+1], + }, + }, + } + buf := new(bytes.Buffer) + err := rps.MarshalCBOR(buf) + require.NoError(t, err) + err = ds.Put(ctx, dshelp.MultihashToDsKey(c.Hash()), buf.Bytes()) + require.NoError(t, err) + } + + l, err := discoveryimpl.NewLocal(ds) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, l) + + for i, c := range payloadCids { + expectedPeers := []retrievalmarket.RetrievalPeer{ + { + Address: address.TestAddress, + ID: peers[i*2], + PieceCID: &pieceCIDs[i*2], + }, + { + Address: address.TestAddress2, + ID: peers[i*2+1], + PieceCID: &pieceCIDs[i*2+1], + }, + } + peers, err := l.GetPeers(c) + require.NoError(t, err) + require.Equal(t, expectedPeers, peers) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations.go new file mode 100644 index 00000000000..bd65cc517e1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations.go @@ -0,0 +1,37 @@ +package migrations + +import ( + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + + "github.com/filecoin-project/go-fil-markets/discovery" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +//go:generate cbor-gen-for RetrievalPeers0 + +// RetrievalPeers0 is version 0 of RetrievalPeers +type RetrievalPeers0 struct { + Peers []migrations.RetrievalPeer0 +} + +// MigrateRetrievalPeers0To1 migrates a tuple encoded list of retrieval peers to a map encoded list +func MigrateRetrievalPeers0To1(oldRps *RetrievalPeers0) (*discovery.RetrievalPeers, error) { + peers := make([]retrievalmarket.RetrievalPeer, 0, len(oldRps.Peers)) + for _, oldRp := range oldRps.Peers { + peers = append(peers, retrievalmarket.RetrievalPeer{ + Address: oldRp.Address, + ID: oldRp.ID, + PieceCID: oldRp.PieceCID, + }) + } + return &discovery.RetrievalPeers{ + Peers: peers, + }, nil +} + +// RetrievalPeersMigrations are migrations for the store local discovery list of peers we can retrieve from +var RetrievalPeersMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateRetrievalPeers0To1, versioning.VersionKey("1")), +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations_cbor_gen.go new file mode 100644 index 00000000000..6dc5e2b5ae8 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/migrations/migrations_cbor_gen.go @@ -0,0 +1,105 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + migrations "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufRetrievalPeers0 = []byte{129} + +func (t *RetrievalPeers0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRetrievalPeers0); err != nil { + return err + } + + // t.Peers ([]migrations.RetrievalPeer0) (slice) + if len(t.Peers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Peers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Peers))); err != nil { + return err + } + for _, v := range t.Peers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *RetrievalPeers0) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeers0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Peers ([]migrations.RetrievalPeer0) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Peers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Peers = make([]migrations.RetrievalPeer0, extra) + } + + for i := 0; i < int(extra); i++ { + + var v migrations.RetrievalPeer0 + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Peers[i] = v + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types.go new file mode 100644 index 00000000000..00f909c76e5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types.go @@ -0,0 +1,19 @@ +package discovery + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +//go:generate cbor-gen-for --map-encoding RetrievalPeers + +// RetrievalPeers is a convenience struct for encoding slices of RetrievalPeer +type RetrievalPeers struct { + Peers []retrievalmarket.RetrievalPeer +} + +// PeerResolver is an interface for looking up providers that may have a piece +type PeerResolver interface { + GetPeers(payloadCID cid.Cid) ([]retrievalmarket.RetrievalPeer, error) // TODO: channel +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types_cbor_gen.go new file mode 100644 index 00000000000..22c625e9015 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/discovery/types_cbor_gen.go @@ -0,0 +1,136 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package discovery + +import ( + "fmt" + "io" + "math" + "sort" + + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *RetrievalPeers) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Peers ([]retrievalmarket.RetrievalPeer) (slice) + if len("Peers") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Peers\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Peers"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Peers")); err != nil { + return err + } + + if len(t.Peers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Peers was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Peers))); err != nil { + return err + } + for _, v := range t.Peers { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *RetrievalPeers) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeers{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("RetrievalPeers: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Peers ([]retrievalmarket.RetrievalPeer) (slice) + case "Peers": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Peers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Peers = make([]retrievalmarket.RetrievalPeer, extra) + } + + for i := 0; i < int(extra); i++ { + + var v retrievalmarket.RetrievalPeer + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Peers[i] = v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd new file mode 100644 index 00000000000..cc9512057f6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd @@ -0,0 +1,185 @@ +stateDiagram-v2 + state "DealStatusNew" as DealStatusNew + state "DealStatusWaitForAcceptance" as DealStatusWaitForAcceptance + state "DealStatusPaymentChannelCreating" as DealStatusPaymentChannelCreating + state "DealStatusPaymentChannelAddingFunds" as DealStatusPaymentChannelAddingFunds + state "DealStatusAccepted" as DealStatusAccepted + state "DealStatusFailing" as DealStatusFailing + state "DealStatusRejected" as DealStatusRejected + state "DealStatusFundsNeeded" as DealStatusFundsNeeded + state "DealStatusSendFunds" as DealStatusSendFunds + state "DealStatusSendFundsLastPayment" as DealStatusSendFundsLastPayment + state "DealStatusOngoing" as DealStatusOngoing + state "DealStatusFundsNeededLastPayment" as DealStatusFundsNeededLastPayment + state "DealStatusCompleted" as DealStatusCompleted + state "DealStatusDealNotFound" as DealStatusDealNotFound + state "DealStatusErrored" as DealStatusErrored + state "DealStatusBlocksComplete" as DealStatusBlocksComplete + state "DealStatusFinalizing" as DealStatusFinalizing + state "DealStatusCheckComplete" as DealStatusCheckComplete + state "DealStatusCheckFunds" as DealStatusCheckFunds + state "DealStatusInsufficientFunds" as DealStatusInsufficientFunds + state "DealStatusPaymentChannelAllocatingLane" as DealStatusPaymentChannelAllocatingLane + state "DealStatusCancelling" as DealStatusCancelling + state "DealStatusCancelled" as DealStatusCancelled + state "DealStatusRetryLegacy" as DealStatusRetryLegacy + state "DealStatusWaitForAcceptanceLegacy" as DealStatusWaitForAcceptanceLegacy + state "DealStatusWaitingForLastBlocks" as DealStatusWaitingForLastBlocks + state "DealStatusPaymentChannelAddingInitialFunds" as DealStatusPaymentChannelAddingInitialFunds + state "DealStatusErroring" as DealStatusErroring + state "DealStatusRejecting" as DealStatusRejecting + state "DealStatusDealNotFoundCleanup" as DealStatusDealNotFoundCleanup + state "DealStatusFinalizingBlockstore" as DealStatusFinalizingBlockstore + DealStatusNew : On entry runs ProposeDeal + DealStatusPaymentChannelCreating : On entry runs WaitPaymentChannelReady + DealStatusPaymentChannelAddingFunds : On entry runs WaitPaymentChannelReady + DealStatusAccepted : On entry runs SetupPaymentChannelStart + DealStatusFailing : On entry runs CancelDeal + DealStatusFundsNeeded : On entry runs ProcessPaymentRequested + DealStatusSendFunds : On entry runs SendFunds + DealStatusSendFundsLastPayment : On entry runs SendFunds + DealStatusOngoing : On entry runs Ongoing + DealStatusFundsNeededLastPayment : On entry runs ProcessPaymentRequested + DealStatusCheckComplete : On entry runs CheckComplete + DealStatusCheckFunds : On entry runs CheckFunds + DealStatusPaymentChannelAllocatingLane : On entry runs AllocateLane + DealStatusCancelling : On entry runs CancelDeal + DealStatusRetryLegacy : On entry runs ProposeDeal + DealStatusPaymentChannelAddingInitialFunds : On entry runs WaitPaymentChannelReady + DealStatusErroring : On entry runs FailsafeFinalizeBlockstore + DealStatusRejecting : On entry runs FailsafeFinalizeBlockstore + DealStatusDealNotFoundCleanup : On entry runs FailsafeFinalizeBlockstore + DealStatusFinalizingBlockstore : On entry runs FinalizeBlockstore + [*] --> DealStatusNew + note right of DealStatusNew + The following events are not shown cause they can trigger from any state. + + ClientEventWriteDealProposalErrored - transitions state to DealStatusErroring + ClientEventUnknownResponseReceived - transitions state to DealStatusFailing + ClientEventDataTransferError - transitions state to DealStatusErroring + ClientEventWriteDealPaymentErrored - transitions state to DealStatusErroring + ClientEventProviderCancelled - transitions state to DealStatusCancelling + ClientEventCancel - transitions state to DealStatusCancelling + end note + DealStatusNew --> DealStatusNew : ClientEventOpen + DealStatusNew --> DealStatusWaitForAcceptance : ClientEventDealProposed + DealStatusRetryLegacy --> DealStatusWaitForAcceptanceLegacy : ClientEventDealProposed + DealStatusWaitForAcceptance --> DealStatusRetryLegacy : ClientEventDealRejected + DealStatusWaitForAcceptanceLegacy --> DealStatusRejecting : ClientEventDealRejected + DealStatusWaitForAcceptance --> DealStatusDealNotFoundCleanup : ClientEventDealNotFound + DealStatusWaitForAcceptanceLegacy --> DealStatusDealNotFoundCleanup : ClientEventDealNotFound + DealStatusWaitForAcceptance --> DealStatusAccepted : ClientEventDealAccepted + DealStatusWaitForAcceptanceLegacy --> DealStatusAccepted : ClientEventDealAccepted + DealStatusPaymentChannelCreating --> DealStatusFailing : ClientEventPaymentChannelErrored + DealStatusPaymentChannelAddingFunds --> DealStatusFailing : ClientEventPaymentChannelErrored + DealStatusAccepted --> DealStatusFailing : ClientEventPaymentChannelErrored + DealStatusAccepted --> DealStatusOngoing : ClientEventPaymentChannelSkip + DealStatusAccepted --> DealStatusPaymentChannelCreating : ClientEventPaymentChannelCreateInitiated + DealStatusAccepted --> DealStatusPaymentChannelAddingInitialFunds : ClientEventPaymentChannelAddingFunds + DealStatusCheckFunds --> DealStatusPaymentChannelAddingFunds : ClientEventPaymentChannelAddingFunds + DealStatusPaymentChannelCreating --> DealStatusPaymentChannelAllocatingLane : ClientEventPaymentChannelReady + DealStatusPaymentChannelAddingFunds --> DealStatusOngoing : ClientEventPaymentChannelReady + DealStatusAccepted --> DealStatusPaymentChannelAllocatingLane : ClientEventPaymentChannelReady + DealStatusCheckFunds --> DealStatusOngoing : ClientEventPaymentChannelReady + DealStatusPaymentChannelAddingInitialFunds --> DealStatusPaymentChannelAllocatingLane : ClientEventPaymentChannelReady + DealStatusPaymentChannelAllocatingLane --> DealStatusFailing : ClientEventAllocateLaneErrored + DealStatusPaymentChannelAllocatingLane --> DealStatusOngoing : ClientEventLaneAllocated + DealStatusFundsNeeded --> DealStatusFundsNeededLastPayment : ClientEventLastPaymentRequested + DealStatusSendFunds --> DealStatusOngoing : ClientEventLastPaymentRequested + DealStatusOngoing --> DealStatusFundsNeededLastPayment : ClientEventLastPaymentRequested + DealStatusFundsNeededLastPayment --> DealStatusFundsNeededLastPayment : ClientEventLastPaymentRequested + DealStatusBlocksComplete --> DealStatusSendFundsLastPayment : ClientEventLastPaymentRequested + DealStatusCheckComplete --> DealStatusCheckComplete : ClientEventLastPaymentRequested + DealStatusFundsNeeded --> DealStatusFundsNeeded : ClientEventPaymentRequested + DealStatusSendFunds --> DealStatusOngoing : ClientEventPaymentRequested + DealStatusOngoing --> DealStatusFundsNeeded : ClientEventPaymentRequested + DealStatusFundsNeededLastPayment --> DealStatusFundsNeeded : ClientEventPaymentRequested + DealStatusBlocksComplete --> DealStatusFundsNeeded : ClientEventPaymentRequested + DealStatusCheckComplete --> DealStatusCheckComplete : ClientEventPaymentRequested + DealStatusWaitForAcceptance --> DealStatusAccepted : ClientEventUnsealPaymentRequested + DealStatusWaitForAcceptanceLegacy --> DealStatusAccepted : ClientEventUnsealPaymentRequested + DealStatusFundsNeeded --> DealStatusFundsNeeded : ClientEventAllBlocksReceived + DealStatusSendFunds --> DealStatusOngoing : ClientEventAllBlocksReceived + DealStatusSendFundsLastPayment --> DealStatusOngoing : ClientEventAllBlocksReceived + DealStatusOngoing --> DealStatusBlocksComplete : ClientEventAllBlocksReceived + DealStatusFundsNeededLastPayment --> DealStatusSendFundsLastPayment : ClientEventAllBlocksReceived + DealStatusBlocksComplete --> DealStatusBlocksComplete : ClientEventAllBlocksReceived + DealStatusCheckComplete --> DealStatusFinalizingBlockstore : ClientEventAllBlocksReceived + DealStatusWaitingForLastBlocks --> DealStatusFinalizingBlockstore : ClientEventAllBlocksReceived + DealStatusFundsNeeded --> DealStatusFundsNeeded : ClientEventBlocksReceived + DealStatusSendFunds --> DealStatusOngoing : ClientEventBlocksReceived + DealStatusSendFundsLastPayment --> DealStatusOngoing : ClientEventBlocksReceived + DealStatusOngoing --> DealStatusOngoing : ClientEventBlocksReceived + DealStatusFundsNeededLastPayment --> DealStatusFundsNeededLastPayment : ClientEventBlocksReceived + DealStatusCheckComplete --> DealStatusCheckComplete : ClientEventBlocksReceived + DealStatusWaitingForLastBlocks --> DealStatusWaitingForLastBlocks : ClientEventBlocksReceived + DealStatusFundsNeeded --> DealStatusSendFunds : ClientEventSendFunds + DealStatusSendFunds --> DealStatusOngoing : ClientEventSendFunds + DealStatusSendFundsLastPayment --> DealStatusOngoing : ClientEventSendFunds + DealStatusFundsNeededLastPayment --> DealStatusSendFundsLastPayment : ClientEventSendFunds + DealStatusCheckFunds --> DealStatusInsufficientFunds : ClientEventFundsExpended + DealStatusSendFunds --> DealStatusFailing : ClientEventBadPaymentRequested + DealStatusSendFundsLastPayment --> DealStatusFailing : ClientEventBadPaymentRequested + DealStatusSendFunds --> DealStatusFailing : ClientEventCreateVoucherFailed + DealStatusSendFundsLastPayment --> DealStatusFailing : ClientEventCreateVoucherFailed + DealStatusSendFunds --> DealStatusCheckFunds : ClientEventVoucherShortfall + DealStatusSendFundsLastPayment --> DealStatusCheckFunds : ClientEventVoucherShortfall + DealStatusSendFunds --> DealStatusOngoing : ClientEventPaymentNotSent + DealStatusSendFundsLastPayment --> DealStatusFinalizing : ClientEventPaymentNotSent + DealStatusFundsNeeded --> DealStatusOngoing : ClientEventPaymentSent + DealStatusSendFunds --> DealStatusOngoing : ClientEventPaymentSent + DealStatusSendFundsLastPayment --> DealStatusFinalizing : ClientEventPaymentSent + DealStatusFundsNeededLastPayment --> DealStatusOngoing : ClientEventPaymentSent + DealStatusBlocksComplete --> DealStatusCheckComplete : ClientEventPaymentSent + DealStatusCheckComplete --> DealStatusCheckComplete : ClientEventPaymentSent + DealStatusFundsNeeded --> DealStatusCheckComplete : ClientEventComplete + DealStatusSendFunds --> DealStatusCheckComplete : ClientEventComplete + DealStatusSendFundsLastPayment --> DealStatusCheckComplete : ClientEventComplete + DealStatusOngoing --> DealStatusCheckComplete : ClientEventComplete + DealStatusFundsNeededLastPayment --> DealStatusCheckComplete : ClientEventComplete + DealStatusBlocksComplete --> DealStatusCheckComplete : ClientEventComplete + DealStatusFinalizing --> DealStatusFinalizingBlockstore : ClientEventComplete + DealStatusCheckComplete --> DealStatusFinalizingBlockstore : ClientEventCompleteVerified + DealStatusCheckComplete --> DealStatusErroring : ClientEventEarlyTermination + DealStatusCheckComplete --> DealStatusWaitingForLastBlocks : ClientEventWaitForLastBlocks + DealStatusErroring --> DealStatusErrored : ClientEventBlockstoreFinalized + DealStatusRejecting --> DealStatusRejected : ClientEventBlockstoreFinalized + DealStatusDealNotFoundCleanup --> DealStatusDealNotFound : ClientEventBlockstoreFinalized + DealStatusFinalizingBlockstore --> DealStatusCompleted : ClientEventBlockstoreFinalized + DealStatusFinalizingBlockstore --> DealStatusErrored : ClientEventFinalizeBlockstoreErrored + DealStatusFailing --> DealStatusErrored : ClientEventCancelComplete + DealStatusCancelling --> DealStatusCancelled : ClientEventCancelComplete + DealStatusInsufficientFunds --> DealStatusCheckFunds : ClientEventRecheckFunds + + note left of DealStatusWaitForAcceptance : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusPaymentChannelCreating : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusAccepted : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusFailing : The following events only record in this state.

ClientEventProviderCancelled + + + note left of DealStatusOngoing : The following events only record in this state.

ClientEventPaymentNotSent
ClientEventPaymentSent + + + note left of DealStatusCompleted : The following events only record in this state.

ClientEventWaitForLastBlocks + + + note left of DealStatusPaymentChannelAllocatingLane : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusCancelling : The following events only record in this state.

ClientEventDealProposed
ClientEventProviderCancelled + + + note left of DealStatusWaitForAcceptanceLegacy : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusPaymentChannelAddingInitialFunds : The following events only record in this state.

ClientEventLastPaymentRequested
ClientEventPaymentRequested
ClientEventAllBlocksReceived
ClientEventBlocksReceived + + + note left of DealStatusFinalizingBlockstore : The following events only record in this state.

ClientEventWaitForLastBlocks + diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.png b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.png new file mode 100644 index 00000000000..8e8dd898c87 Binary files /dev/null and b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.png differ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.svg b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.svg new file mode 100644 index 00000000000..260804b54a0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalclient.mmd.svg @@ -0,0 +1,6 @@ +ClientEventOpenClientEventDealProposedClientEventDealProposedClientEventDealRejectedClientEventDealRejectedClientEventDealNotFoundClientEventDealNotFoundClientEventDealAcceptedClientEventDealAcceptedClientEventPaymentChannelErroredClientEventPaymentChannelErroredClientEventPaymentChannelErroredClientEventPaymentChannelSkipClientEventPaymentChannelCreateInitiatedClientEventPaymentChannelAddingFundsClientEventPaymentChannelAddingFundsClientEventPaymentChannelReadyClientEventPaymentChannelReadyClientEventPaymentChannelReadyClientEventPaymentChannelReadyClientEventPaymentChannelReadyClientEventAllocateLaneErroredClientEventLaneAllocatedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventPaymentRequestedClientEventPaymentRequestedClientEventPaymentRequestedClientEventPaymentRequestedClientEventPaymentRequestedClientEventUnsealPaymentRequestedClientEventUnsealPaymentRequestedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventBlocksReceivedClientEventSendFundsClientEventSendFundsClientEventSendFundsClientEventSendFundsClientEventFundsExpendedClientEventBadPaymentRequestedClientEventBadPaymentRequestedClientEventCreateVoucherFailedClientEventCreateVoucherFailedClientEventVoucherShortfallClientEventVoucherShortfallClientEventPaymentNotSentClientEventPaymentNotSentClientEventPaymentSentClientEventPaymentSentClientEventPaymentSentClientEventPaymentSentClientEventPaymentSentClientEventPaymentSentClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteVerifiedClientEventEarlyTerminationClientEventWaitForLastBlocksClientEventBlockstoreFinalizedClientEventBlockstoreFinalizedClientEventBlockstoreFinalizedClientEventBlockstoreFinalizedClientEventFinalizeBlockstoreErroredClientEventCancelCompleteClientEventCancelCompleteClientEventRecheckFundsDealStatusNewOn entry runs ProposeDealDealStatusWaitForAcceptanceDealStatusPaymentChannelCreatingOn entry runs WaitPaymentChannelReadyDealStatusPaymentChannelAddingFundsOn entry runs WaitPaymentChannelReadyDealStatusAcceptedOn entry runs SetupPaymentChannelStartDealStatusFailingOn entry runs CancelDealDealStatusRejectedDealStatusFundsNeededOn entry runs ProcessPaymentRequestedDealStatusSendFundsOn entry runs SendFundsDealStatusSendFundsLastPaymentOn entry runs SendFundsDealStatusOngoingOn entry runs OngoingDealStatusFundsNeededLastPaymentOn entry runs ProcessPaymentRequestedDealStatusCompletedDealStatusDealNotFoundDealStatusErroredDealStatusBlocksCompleteDealStatusFinalizingDealStatusCheckCompleteOn entry runs CheckCompleteDealStatusCheckFundsOn entry runs CheckFundsDealStatusInsufficientFundsDealStatusPaymentChannelAllocatingLaneOn entry runs AllocateLaneDealStatusCancellingOn entry runs CancelDealDealStatusCancelledDealStatusRetryLegacyOn entry runs ProposeDealDealStatusWaitForAcceptanceLegacyDealStatusWaitingForLastBlocksDealStatusPaymentChannelAddingInitialFundsOn entry runs WaitPaymentChannelReadyDealStatusErroringOn entry runs FailsafeFinalizeBlockstoreDealStatusRejectingOn entry runs FailsafeFinalizeBlockstoreDealStatusDealNotFoundCleanupOn entry runs FailsafeFinalizeBlockstoreDealStatusFinalizingBlockstoreOn entry runs FinalizeBlockstoreThe following events are not shown cause they can trigger from any state.ClientEventWriteDealProposalErrored - transitions state to DealStatusErroringClientEventUnknownResponseReceived - transitions state to DealStatusFailingClientEventDataTransferError - transitions state to DealStatusErroringClientEventWriteDealPaymentErrored - transitions state to DealStatusErroringClientEventProviderCancelled - transitions state to DealStatusCancellingClientEventCancel - transitions state to DealStatusCancellingThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventProviderCancelledThe following events only record in this state.ClientEventPaymentNotSentClientEventPaymentSentThe following events only record in this state.ClientEventWaitForLastBlocksThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventDealProposedClientEventProviderCancelledThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventLastPaymentRequestedClientEventPaymentRequestedClientEventAllBlocksReceivedClientEventBlocksReceivedThe following events only record in this state.ClientEventWaitForLastBlocks \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd new file mode 100644 index 00000000000..6caac383a6c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd @@ -0,0 +1,72 @@ +stateDiagram-v2 + state "DealStatusNew" as DealStatusNew + state "DealStatusUnsealing" as DealStatusUnsealing + state "DealStatusUnsealed" as DealStatusUnsealed + state "DealStatusFundsNeededUnseal" as DealStatusFundsNeededUnseal + state "DealStatusFailing" as DealStatusFailing + state "DealStatusFundsNeeded" as DealStatusFundsNeeded + state "DealStatusOngoing" as DealStatusOngoing + state "DealStatusFundsNeededLastPayment" as DealStatusFundsNeededLastPayment + state "DealStatusCompleted" as DealStatusCompleted + state "DealStatusErrored" as DealStatusErrored + state "DealStatusBlocksComplete" as DealStatusBlocksComplete + state "DealStatusFinalizing" as DealStatusFinalizing + state "DealStatusCompleting" as DealStatusCompleting + state "DealStatusCancelling" as DealStatusCancelling + state "DealStatusCancelled" as DealStatusCancelled + DealStatusUnsealing : On entry runs UnsealData + DealStatusUnsealed : On entry runs UnpauseDeal + DealStatusFundsNeededUnseal : On entry runs TrackTransfer + DealStatusFailing : On entry runs CancelDeal + DealStatusCompleting : On entry runs CleanupDeal + DealStatusCancelling : On entry runs CancelDeal + [*] --> DealStatusNew + note right of DealStatusNew + The following events are not shown cause they can trigger from any state. + + ProviderEventDataTransferError - transitions state to DealStatusErrored + ProviderEventMultiStoreError - transitions state to DealStatusErrored + ProviderEventClientCancelled - transitions state to DealStatusCancelling + end note + DealStatusNew --> DealStatusNew : ProviderEventOpen + DealStatusNew --> DealStatusUnsealing : ProviderEventDealAccepted + DealStatusFundsNeededUnseal --> DealStatusFundsNeededUnseal : ProviderEventDealAccepted + DealStatusUnsealing --> DealStatusFailing : ProviderEventUnsealError + DealStatusUnsealing --> DealStatusUnsealed : ProviderEventUnsealComplete + DealStatusUnsealed --> DealStatusOngoing : ProviderEventBlockSent + DealStatusOngoing --> DealStatusOngoing : ProviderEventBlockSent + DealStatusOngoing --> DealStatusBlocksComplete : ProviderEventBlocksCompleted + DealStatusNew --> DealStatusFundsNeededUnseal : ProviderEventPaymentRequested + DealStatusUnsealed --> DealStatusFundsNeeded : ProviderEventPaymentRequested + DealStatusOngoing --> DealStatusFundsNeeded : ProviderEventPaymentRequested + DealStatusBlocksComplete --> DealStatusFundsNeededLastPayment : ProviderEventPaymentRequested + DealStatusFundsNeeded --> DealStatusFailing : ProviderEventSaveVoucherFailed + DealStatusFundsNeededLastPayment --> DealStatusFailing : ProviderEventSaveVoucherFailed + DealStatusFundsNeeded --> DealStatusFundsNeeded : ProviderEventPartialPaymentReceived + DealStatusFundsNeededLastPayment --> DealStatusFundsNeededLastPayment : ProviderEventPartialPaymentReceived + DealStatusFundsNeededUnseal --> DealStatusUnsealing : ProviderEventPaymentReceived + DealStatusFundsNeeded --> DealStatusOngoing : ProviderEventPaymentReceived + DealStatusFundsNeededLastPayment --> DealStatusFinalizing : ProviderEventPaymentReceived + DealStatusBlocksComplete --> DealStatusCompleting : ProviderEventComplete + DealStatusFinalizing --> DealStatusCompleting : ProviderEventComplete + DealStatusCompleting --> DealStatusCompleted : ProviderEventCleanupComplete + DealStatusFailing --> DealStatusErrored : ProviderEventCancelComplete + DealStatusCancelling --> DealStatusCancelled : ProviderEventCancelComplete + + note left of DealStatusFailing : The following events only record in this state.

ProviderEventClientCancelled + + + note left of DealStatusFundsNeeded : The following events only record in this state.

ProviderEventPaymentRequested + + + note left of DealStatusOngoing : The following events only record in this state.

ProviderEventPaymentReceived + + + note left of DealStatusBlocksComplete : The following events only record in this state.

ProviderEventPaymentReceived + + + note left of DealStatusFinalizing : The following events only record in this state.

ProviderEventPaymentReceived + + + note left of DealStatusCancelling : The following events only record in this state.

ProviderEventClientCancelled + diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.png b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.png new file mode 100644 index 00000000000..0d97b8e1bfe Binary files /dev/null and b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.png differ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.svg b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.svg new file mode 100644 index 00000000000..da711eae6f7 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/retrievalprovider.mmd.svg @@ -0,0 +1,6 @@ +ProviderEventOpenProviderEventDealAcceptedProviderEventDealAcceptedProviderEventUnsealErrorProviderEventUnsealCompleteProviderEventBlockSentProviderEventBlockSentProviderEventBlocksCompletedProviderEventPaymentRequestedProviderEventPaymentRequestedProviderEventPaymentRequestedProviderEventPaymentRequestedProviderEventSaveVoucherFailedProviderEventSaveVoucherFailedProviderEventPartialPaymentReceivedProviderEventPartialPaymentReceivedProviderEventPaymentReceivedProviderEventPaymentReceivedProviderEventPaymentReceivedProviderEventCompleteProviderEventCompleteProviderEventCleanupCompleteProviderEventCancelCompleteProviderEventCancelCompleteDealStatusNewDealStatusUnsealingOn entry runs UnsealDataDealStatusUnsealedOn entry runs UnpauseDealDealStatusFundsNeededUnsealOn entry runs TrackTransferDealStatusFailingOn entry runs CancelDealDealStatusFundsNeededDealStatusOngoingDealStatusFundsNeededLastPaymentDealStatusCompletedDealStatusErroredDealStatusBlocksCompleteDealStatusFinalizingDealStatusCompletingOn entry runs CleanupDealDealStatusCancellingOn entry runs CancelDealDealStatusCancelledThe following events are not shown cause they can trigger from any state.ProviderEventDataTransferError - transitions state to DealStatusErroredProviderEventMultiStoreError - transitions state to DealStatusErroredProviderEventClientCancelled - transitions state to DealStatusCancellingThe following events only record in this state.ProviderEventClientCancelledThe following events only record in this state.ProviderEventPaymentRequestedThe following events only record in this state.ProviderEventPaymentReceivedThe following events only record in this state.ProviderEventPaymentReceivedThe following events only record in this state.ProviderEventPaymentReceivedThe following events only record in this state.ProviderEventClientCancelled \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd new file mode 100644 index 00000000000..47079fe5035 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd @@ -0,0 +1,96 @@ +stateDiagram-v2 + state "StorageDealUnknown" as 0 + state "StorageDealProposalAccepted" as 3 + state "StorageDealSealing" as 5 + state "StorageDealActive" as 7 + state "StorageDealExpired" as 8 + state "StorageDealSlashed" as 9 + state "StorageDealFailing" as 11 + state "StorageDealFundsReserved" as 12 + state "StorageDealCheckForAcceptance" as 13 + state "StorageDealStartDataTransfer" as 16 + state "StorageDealTransferring" as 17 + state "StorageDealReserveClientFunds" as 21 + state "StorageDealClientFunding" as 23 + state "StorageDealError" as 26 + state "StorageDealClientTransferRestart" as 28 + state "StorageDealAwaitingPreCommit" as 29 + state "StorageDealTransferQueued" as 30 + 3 : On entry runs ValidateDealPublished + 5 : On entry runs VerifyDealActivated + 7 : On entry runs WaitForDealCompletion + 11 : On entry runs FailDeal + 12 : On entry runs ProposeDeal + 13 : On entry runs CheckForDealAcceptance + 16 : On entry runs InitiateDataTransfer + 21 : On entry runs ReserveClientFunds + 23 : On entry runs WaitForFunding + 28 : On entry runs RestartDataTransfer + 29 : On entry runs VerifyDealPreCommitted + [*] --> 0 + note right of 0 + The following events are not shown cause they can trigger from any state. + + ClientEventStreamCloseError - transitions state to StorageDealError + ClientEventRestart - does not transition state + end note + 0 --> 21 : ClientEventOpen + 21 --> 23 : ClientEventFundingInitiated + 21 --> 11 : ClientEventReserveFundsFailed + 23 --> 11 : ClientEventReserveFundsFailed + 21 --> 12 : ClientEventFundingComplete + 23 --> 12 : ClientEventFundingComplete + 12 --> 26 : ClientEventWriteProposalFailed + 12 --> 11 : ClientEventReadResponseFailed + 12 --> 11 : ClientEventResponseVerificationFailed + 12 --> 16 : ClientEventInitiateDataTransfer + 12 --> 11 : ClientEventUnexpectedDealState + 16 --> 11 : ClientEventDataTransferFailed + 17 --> 11 : ClientEventDataTransferFailed + 30 --> 11 : ClientEventDataTransferFailed + 28 --> 11 : ClientEventDataTransferRestartFailed + 16 --> 30 : ClientEventDataTransferQueued + 30 --> 17 : ClientEventDataTransferInitiated + 16 --> 17 : ClientEventDataTransferRestarted + 28 --> 17 : ClientEventDataTransferRestarted + 30 --> 17 : ClientEventDataTransferRestarted + 17 --> 11 : ClientEventDataTransferStalled + 30 --> 11 : ClientEventDataTransferStalled + 16 --> 11 : ClientEventDataTransferCancelled + 17 --> 11 : ClientEventDataTransferCancelled + 28 --> 11 : ClientEventDataTransferCancelled + 30 --> 11 : ClientEventDataTransferCancelled + 16 --> 13 : ClientEventDataTransferComplete + 17 --> 13 : ClientEventDataTransferComplete + 30 --> 13 : ClientEventDataTransferComplete + 13 --> 13 : ClientEventWaitForDealState + 13 --> 11 : ClientEventResponseDealDidNotMatch + 13 --> 11 : ClientEventDealRejected + 13 --> 3 : ClientEventDealAccepted + 3 --> 26 : ClientEventDealPublishFailed + 3 --> 29 : ClientEventDealPublished + 29 --> 26 : + 29 --> 5 : + 5 --> 26 : ClientEventDealActivationFailed + 5 --> 7 : ClientEventDealActivated + 29 --> 7 : ClientEventDealActivated + 7 --> 9 : ClientEventDealSlashed + 7 --> 8 : ClientEventDealExpired + 7 --> 26 : ClientEventDealCompletionFailed + 11 --> 26 : ClientEventFailed + 17 --> 28 : ClientEventRestart + + note left of 3 : The following events only record in this state.

ClientEventFundsReleased + + + note left of 11 : The following events only record in this state.

ClientEventFundsReleased + + + note left of 17 : The following events only record in this state.

ClientEventDataTransferRestarted + + + note left of 21 : The following events only record in this state.

ClientEventFundsReserved + + 9 --> [*] + 8 --> [*] + 26 --> [*] diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.png b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.png new file mode 100644 index 00000000000..0927c11b3de Binary files /dev/null and b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.png differ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.svg b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.svg new file mode 100644 index 00000000000..81009f2f9c3 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageclient.mmd.svg @@ -0,0 +1,6 @@ +ClientEventOpenClientEventFundingInitiatedClientEventReserveFundsFailedClientEventReserveFundsFailedClientEventFundingCompleteClientEventFundingCompleteClientEventWriteProposalFailedClientEventReadResponseFailedClientEventResponseVerificationFailedClientEventInitiateDataTransferClientEventUnexpectedDealStateClientEventDataTransferFailedClientEventDataTransferFailedClientEventDataTransferFailedClientEventDataTransferRestartFailedClientEventDataTransferQueuedClientEventDataTransferInitiatedClientEventDataTransferRestartedClientEventDataTransferRestartedClientEventDataTransferRestartedClientEventDataTransferStalledClientEventDataTransferStalledClientEventDataTransferCancelledClientEventDataTransferCancelledClientEventDataTransferCancelledClientEventDataTransferCancelledClientEventDataTransferCompleteClientEventDataTransferCompleteClientEventDataTransferCompleteClientEventWaitForDealStateClientEventResponseDealDidNotMatchClientEventDealRejectedClientEventDealAcceptedClientEventDealPublishFailedClientEventDealPublished<invalid Value><invalid Value>ClientEventDealActivationFailedClientEventDealActivatedClientEventDealActivatedClientEventDealSlashedClientEventDealExpiredClientEventDealCompletionFailedClientEventFailedClientEventRestartStorageDealUnknownStorageDealProposalAcceptedOn entry runs ValidateDealPublishedStorageDealSealingOn entry runs VerifyDealActivatedStorageDealActiveOn entry runs WaitForDealCompletionStorageDealExpiredStorageDealSlashedStorageDealFailingOn entry runs FailDealStorageDealFundsReservedOn entry runs ProposeDealStorageDealCheckForAcceptanceOn entry runs CheckForDealAcceptanceStorageDealStartDataTransferOn entry runs InitiateDataTransferStorageDealTransferringStorageDealReserveClientFundsOn entry runs ReserveClientFundsStorageDealClientFundingOn entry runs WaitForFundingStorageDealErrorStorageDealClientTransferRestartOn entry runs RestartDataTransferStorageDealAwaitingPreCommitOn entry runs VerifyDealPreCommittedStorageDealTransferQueuedThe following events are not shown cause they can trigger from any state.ClientEventStreamCloseError - transitions state to StorageDealErrorClientEventRestart - does not transition stateThe following events only record in this state.ClientEventFundsReleasedThe following events only record in this state.ClientEventFundsReleasedThe following events only record in this state.ClientEventDataTransferRestartedThe following events only record in this state.ClientEventFundsReserved \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd new file mode 100644 index 00000000000..2f48bf66de5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd @@ -0,0 +1,118 @@ +stateDiagram-v2 + state "StorageDealUnknown" as 0 + state "StorageDealStaged" as 4 + state "StorageDealSealing" as 5 + state "StorageDealFinalizing" as 6 + state "StorageDealActive" as 7 + state "StorageDealExpired" as 8 + state "StorageDealSlashed" as 9 + state "StorageDealRejecting" as 10 + state "StorageDealFailing" as 11 + state "StorageDealValidating" as 14 + state "StorageDealAcceptWait" as 15 + state "StorageDealTransferring" as 17 + state "StorageDealWaitingForData" as 18 + state "StorageDealVerifyData" as 19 + state "StorageDealReserveProviderFunds" as 20 + state "StorageDealProviderFunding" as 22 + state "StorageDealPublish" as 24 + state "StorageDealPublishing" as 25 + state "StorageDealError" as 26 + state "StorageDealProviderTransferAwaitRestart" as 27 + state "StorageDealAwaitingPreCommit" as 29 + 4 : On entry runs HandoffDeal + 5 : On entry runs VerifyDealActivated + 6 : On entry runs CleanupDeal + 7 : On entry runs WaitForDealCompletion + 10 : On entry runs RejectDeal + 11 : On entry runs FailDeal + 14 : On entry runs ValidateDealProposal + 15 : On entry runs DecideOnProposal + 19 : On entry runs VerifyData + 20 : On entry runs ReserveProviderFunds + 22 : On entry runs WaitForFunding + 24 : On entry runs PublishDeal + 25 : On entry runs WaitForPublish + 27 : On entry runs WaitForTransferRestart + 29 : On entry runs VerifyDealPreCommitted + [*] --> 0 + note right of 0 + The following events are not shown cause they can trigger from any state. + + ProviderEventNodeErrored - transitions state to StorageDealFailing + ProviderEventRestart - does not transition state + ProviderEventAwaitTransferRestartTimeout - just records + end note + 0 --> 14 : ProviderEventOpen + 14 --> 10 : ProviderEventDealRejected + 15 --> 10 : ProviderEventDealRejected + 19 --> 10 : ProviderEventDealRejected + 10 --> 11 : ProviderEventRejectionSent + 14 --> 15 : ProviderEventDealDeciding + 15 --> 18 : ProviderEventDataRequested + 17 --> 11 : ProviderEventDataTransferFailed + 27 --> 11 : ProviderEventDataTransferFailed + 18 --> 17 : ProviderEventDataTransferInitiated + 27 --> 17 : ProviderEventDataTransferInitiated + 18 --> 17 : ProviderEventDataTransferRestarted + 27 --> 17 : ProviderEventDataTransferRestarted + 17 --> 11 : ProviderEventDataTransferCancelled + 18 --> 11 : ProviderEventDataTransferCancelled + 27 --> 11 : ProviderEventDataTransferCancelled + 17 --> 19 : ProviderEventDataTransferCompleted + 27 --> 19 : ProviderEventDataTransferCompleted + 19 --> 11 : ProviderEventDataVerificationFailed + 18 --> 20 : ProviderEventVerifiedData + 19 --> 20 : ProviderEventVerifiedData + 20 --> 22 : ProviderEventFundingInitiated + 20 --> 24 : ProviderEventFunded + 22 --> 24 : ProviderEventFunded + 24 --> 25 : ProviderEventDealPublishInitiated + 25 --> 11 : ProviderEventDealPublishError + 10 --> 11 : ProviderEventSendResponseFailed + 15 --> 11 : ProviderEventSendResponseFailed + 25 --> 4 : ProviderEventDealPublished + 4 --> 11 : ProviderEventFileStoreErrored + 5 --> 11 : ProviderEventFileStoreErrored + 7 --> 11 : ProviderEventFileStoreErrored + 29 --> 11 : ProviderEventFileStoreErrored + 4 --> 11 : ProviderEventMultistoreErrored + 4 --> 11 : ProviderEventDealHandoffFailed + 4 --> 29 : ProviderEventDealHandedOff + 29 --> 11 : ProviderEventDealPrecommitFailed + 29 --> 5 : ProviderEventDealPrecommitted + 5 --> 11 : ProviderEventDealActivationFailed + 5 --> 6 : ProviderEventDealActivated + 29 --> 6 : ProviderEventDealActivated + 6 --> 7 : ProviderEventCleanupFinished + 7 --> 9 : ProviderEventDealSlashed + 7 --> 8 : ProviderEventDealExpired + 7 --> 26 : ProviderEventDealCompletionFailed + 11 --> 26 : ProviderEventFailed + 10 --> 26 : ProviderEventRestart + 14 --> 26 : ProviderEventRestart + 15 --> 26 : ProviderEventRestart + 17 --> 27 : ProviderEventRestart + 27 --> 11 : ProviderEventAwaitTransferRestartTimeout + 20 --> 11 : ProviderEventTrackFundsFailed + + note left of 4 : The following events only record in this state.

ProviderEventPieceStoreErrored + + + note left of 11 : The following events only record in this state.

ProviderEventFundsReleased + + + note left of 17 : The following events only record in this state.

ProviderEventDataTransferRestarted
ProviderEventDataTransferStalled + + + note left of 20 : The following events only record in this state.

ProviderEventFundsReserved + + + note left of 25 : The following events only record in this state.

ProviderEventFundsReleased + + + note left of 27 : The following events only record in this state.

ProviderEventDataTransferStalled + + 26 --> [*] + 9 --> [*] + 8 --> [*] diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.png b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.png new file mode 100644 index 00000000000..2e273df87a7 Binary files /dev/null and b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.png differ diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.svg b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.svg new file mode 100644 index 00000000000..8565cf1b8ad --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docs/storageprovider.mmd.svg @@ -0,0 +1,6 @@ +ProviderEventOpenProviderEventDealRejectedProviderEventDealRejectedProviderEventDealRejectedProviderEventRejectionSentProviderEventDealDecidingProviderEventDataRequestedProviderEventDataTransferFailedProviderEventDataTransferFailedProviderEventDataTransferInitiatedProviderEventDataTransferInitiatedProviderEventDataTransferRestartedProviderEventDataTransferRestartedProviderEventDataTransferCancelledProviderEventDataTransferCancelledProviderEventDataTransferCancelledProviderEventDataTransferCompletedProviderEventDataTransferCompletedProviderEventDataVerificationFailedProviderEventVerifiedDataProviderEventVerifiedDataProviderEventFundingInitiatedProviderEventFundedProviderEventFundedProviderEventDealPublishInitiatedProviderEventDealPublishErrorProviderEventSendResponseFailedProviderEventSendResponseFailedProviderEventDealPublishedProviderEventFileStoreErroredProviderEventFileStoreErroredProviderEventFileStoreErroredProviderEventFileStoreErroredProviderEventMultistoreErroredProviderEventDealHandoffFailedProviderEventDealHandedOffProviderEventDealPrecommitFailedProviderEventDealPrecommittedProviderEventDealActivationFailedProviderEventDealActivatedProviderEventDealActivatedProviderEventCleanupFinishedProviderEventDealSlashedProviderEventDealExpiredProviderEventDealCompletionFailedProviderEventFailedProviderEventRestartProviderEventRestartProviderEventRestartProviderEventRestartProviderEventAwaitTransferRestartTimeoutProviderEventTrackFundsFailedStorageDealUnknownStorageDealStagedOn entry runs HandoffDealStorageDealSealingOn entry runs VerifyDealActivatedStorageDealFinalizingOn entry runs CleanupDealStorageDealActiveOn entry runs WaitForDealCompletionStorageDealExpiredStorageDealSlashedStorageDealRejectingOn entry runs RejectDealStorageDealFailingOn entry runs FailDealStorageDealValidatingOn entry runs ValidateDealProposalStorageDealAcceptWaitOn entry runs DecideOnProposalStorageDealTransferringStorageDealWaitingForDataStorageDealVerifyDataOn entry runs VerifyDataStorageDealReserveProviderFundsOn entry runs ReserveProviderFundsStorageDealProviderFundingOn entry runs WaitForFundingStorageDealPublishOn entry runs PublishDealStorageDealPublishingOn entry runs WaitForPublishStorageDealErrorStorageDealProviderTransferAwaitRestartOn entry runs WaitForTransferRestartStorageDealAwaitingPreCommitOn entry runs VerifyDealPreCommittedThe following events are not shown cause they can trigger from any state.ProviderEventNodeErrored - transitions state to StorageDealFailingProviderEventRestart - does not transition stateProviderEventAwaitTransferRestartTimeout - just recordsThe following events only record in this state.ProviderEventPieceStoreErroredThe following events only record in this state.ProviderEventFundsReleasedThe following events only record in this state.ProviderEventDataTransferRestartedProviderEventDataTransferStalledThe following events only record in this state.ProviderEventFundsReservedThe following events only record in this state.ProviderEventFundsReleasedThe following events only record in this state.ProviderEventDataTransferStalled \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/docsgen/main.go b/extern/sxx-go-fil-markets@v1.24.0-v17/docsgen/main.go new file mode 100644 index 00000000000..219c1102882 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/docsgen/main.go @@ -0,0 +1,95 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" + "github.com/filecoin-project/go-fil-markets/storagemarket" + storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" +) + +func storageDealStatusCmp(a, b fsm.StateKey) bool { + aDealStatus := a.(storagemarket.StorageDealStatus) + bDealStatus := b.(storagemarket.StorageDealStatus) + return aDealStatus < bDealStatus +} + +func retrievalDealStatusCmp(a, b fsm.StateKey) bool { + aDealStatus := a.(retrievalmarket.DealStatus) + bDealStatus := b.(retrievalmarket.DealStatus) + return aDealStatus < bDealStatus +} + +func updateOnChanged(name string, writeContents func(w io.Writer) error) error { + input, err := os.Open(name) + if err != nil { + return err + } + orig, err := ioutil.ReadAll(input) + if err != nil { + return err + } + err = input.Close() + if err != nil { + return err + } + buf := new(bytes.Buffer) + err = writeContents(buf) + if err != nil { + return err + } + if !bytes.Equal(orig, buf.Bytes()) { + file, err := os.Create(name) + if err != nil { + return err + } + _, err = file.Write(buf.Bytes()) + if err != nil { + return err + } + err = file.Close() + if err != nil { + return err + } + } + return nil +} + +func main() { + + err := updateOnChanged("./docs/storageclient.mmd", func(w io.Writer) error { + return fsm.GenerateUML(w, fsm.MermaidUML, storageimpl.ClientFSMParameterSpec, storagemarket.DealStates, storagemarket.ClientEvents, []fsm.StateKey{storagemarket.StorageDealUnknown}, false, storageDealStatusCmp) + }) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = updateOnChanged("./docs/storageprovider.mmd", func(w io.Writer) error { + return fsm.GenerateUML(w, fsm.MermaidUML, storageimpl.ProviderFSMParameterSpec, storagemarket.DealStates, storagemarket.ProviderEvents, []fsm.StateKey{storagemarket.StorageDealUnknown}, false, storageDealStatusCmp) + }) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = updateOnChanged("./docs/retrievalclient.mmd", func(w io.Writer) error { + return fsm.GenerateUML(w, fsm.MermaidUML, retrievalimpl.ClientFSMParameterSpec, retrievalmarket.DealStatuses, retrievalmarket.ClientEvents, []fsm.StateKey{retrievalmarket.DealStatusNew}, false, retrievalDealStatusCmp) + }) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = updateOnChanged("./docs/retrievalprovider.mmd", func(w io.Writer) error { + return fsm.GenerateUML(w, fsm.MermaidUML, retrievalimpl.ProviderFSMParameterSpec, retrievalmarket.DealStatuses, retrievalmarket.ProviderEvents, []fsm.StateKey{retrievalmarket.DealStatusNew}, false, retrievalDealStatusCmp) + }) + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/README.md b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/README.md new file mode 100644 index 00000000000..2557435cd21 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/README.md @@ -0,0 +1,29 @@ +# filestore + +The `filestore` module is a simple wrapper for os.File. It is used by [pieceio](../pieceio), +[retrievialmarket](../retrievalmarket), and [storagemarket](../storagemarket). + +## Installation +```bash +go get github.com/filecoin-project/go-fil-markets/filestore +``` + +## FileStore +FileStore is the primary export of this module. + +### Usage +To create a new local filestore mounted on a given local directory path, use: +```go +package filestore + +func NewLocalFileStore(basedirectory OsPath) (FileStore, error) +``` + +A FileStore provides the following functions: +* [`Open`](filestore.go) +* [`Create`](filestore.go) +* [`Store`](filestore.go) +* [`Delete`](filestore.go) +* [`CreateTemp`](filestore.go) + +Please the [tests](filestore_test.go) for more information about expected behavior. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/file.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/file.go new file mode 100644 index 00000000000..119ced0f178 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/file.go @@ -0,0 +1,39 @@ +package filestore + +import ( + "os" + "path" +) + +type fd struct { + *os.File + filename string + basepath string +} + +func newFile(basepath OsPath, filename Path) (File, error) { + var err error + result := fd{filename: string(filename), basepath: string(basepath)} + full := path.Join(string(basepath), string(filename)) + result.File, err = os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + return &result, nil +} + +func (f fd) Path() Path { + return Path(f.filename) +} + +func (f fd) OsPath() OsPath { + return OsPath(f.Name()) +} + +func (f fd) Size() int64 { + info, err := os.Stat(f.Name()) + if err != nil { + return -1 + } + return info.Size() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore.go new file mode 100644 index 00000000000..a9c802102d3 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore.go @@ -0,0 +1,83 @@ +package filestore + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +type fileStore struct { + base string +} + +// NewLocalFileStore creates a filestore mounted on a given local directory path +func NewLocalFileStore(baseDir OsPath) (FileStore, error) { + base, err := checkIsDir(string(baseDir)) + if err != nil { + return nil, err + } + return &fileStore{base}, nil +} + +func (fs fileStore) filename(p Path) string { + return filepath.Join(fs.base, string(p)) +} + +func (fs fileStore) Open(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err != nil { + return nil, fmt.Errorf("error trying to open %s: %s", name, err.Error()) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Create(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err == nil { + return nil, fmt.Errorf("file %s already exists", name) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Store(p Path, src File) (Path, error) { + dest, err := fs.Create(p) + if err != nil { + return Path(""), err + } + + if _, err = io.Copy(dest, src); err != nil { + dest.Close() + return Path(""), err + } + return p, dest.Close() +} + +func (fs fileStore) Delete(p Path) error { + filename := string(p) + full := path.Join(string(fs.base), string(filename)) + return os.Remove(full) +} + +func (fs fileStore) CreateTemp() (File, error) { + f, err := ioutil.TempFile(fs.base, "fstmp") + if err != nil { + return nil, err + } + filename := filepath.Base(f.Name()) + return &fd{File: f, basepath: fs.base, filename: filename}, nil +} + +func checkIsDir(baseDir string) (string, error) { + base := filepath.Clean(string(baseDir)) + info, err := os.Stat(base) + if err != nil { + return "", fmt.Errorf("error getting %s info: %s", base, err.Error()) + } + if !info.IsDir() { + return "", fmt.Errorf("%s is not a directory", base) + } + return base, nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore_test.go new file mode 100644 index 00000000000..0daaea58042 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/filestore_test.go @@ -0,0 +1,178 @@ +package filestore + +import ( + "crypto/rand" + "fmt" + "log" + "os" + "path" + "testing" + + "github.com/stretchr/testify/require" +) + +func randBytes(n int) []byte { + arr := make([]byte, n) + _, err := rand.Read(arr) + if err != nil { + log.Fatal(err) + } + return arr +} + +const baseDir = "_test/a/b/c/d" +const existingFile = "existing.txt" + +func init() { + err := os.MkdirAll(baseDir, 0755) + if err != nil { + log.Print(err.Error()) + return + } + filename := path.Join(baseDir, existingFile) + file, err := os.Create(filename) + if err != nil { + log.Print(err.Error()) + return + } + defer file.Close() + _, err = file.Write(randBytes(64)) + if err != nil { + log.Print(err.Error()) + return + } +} + +func Test_SizeFails(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + name := Path("noFile.txt") + file, err := store.Create(name) + require.NoError(t, err) + err = store.Delete(file.Path()) + require.NoError(t, err) + require.Equal(t, int64(-1), file.Size()) +} + +func Test_OpenFileFails(t *testing.T) { + base := "_test/a/b/c/d/e" + err := os.MkdirAll(base, 0755) + require.NoError(t, err) + store, err := NewLocalFileStore(OsPath(base)) + require.NoError(t, err) + err = os.Remove(base) + require.NoError(t, err) + _, err = store.Open(existingFile) + require.Error(t, err) +} + +func Test_RemoveSeparators(t *testing.T) { + first, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + second, err := NewLocalFileStore(OsPath(fmt.Sprintf("%s%c%c", baseDir, os.PathSeparator, os.PathSeparator))) + require.NoError(t, err) + f1, err := first.Open(existingFile) + require.NoError(t, err) + f2, err := second.Open(existingFile) + require.NoError(t, err) + require.Equal(t, f1.Path(), f2.Path()) +} + +func Test_BaseDirIsFileFails(t *testing.T) { + base := fmt.Sprintf("%s%c%s", baseDir, os.PathSeparator, existingFile) + _, err := NewLocalFileStore(OsPath(base)) + require.Error(t, err) +} + +func Test_CreateExistingFileFails(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + _, err = store.Create(Path(existingFile)) + require.Error(t, err) +} + +func Test_StoreFails(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + file, err := store.Open(Path(existingFile)) + require.NoError(t, err) + _, err = store.Store(Path(existingFile), file) + require.Error(t, err) +} + +func Test_OpenFails(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + name := Path("newFile.txt") + _, err = store.Open(name) + require.Error(t, err) +} + +func Test_InvalidBaseDirectory(t *testing.T) { + _, err := NewLocalFileStore("NoSuchDirectory") + require.Error(t, err) +} + +func Test_CreateFile(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + name := Path("newFile.txt") + f, err := store.Create(name) + require.NoError(t, err) + defer func() { + err := store.Delete(f.Path()) + require.NoError(t, err) + }() + bytesToWrite := 32 + written, err := f.Write(randBytes(bytesToWrite)) + require.NoError(t, err) + require.Equal(t, bytesToWrite, written) + require.Equal(t, int64(bytesToWrite), f.Size()) +} + +func Test_CreateTempFile(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + file, err := store.CreateTemp() + require.NoError(t, err) + defer func() { + err := store.Delete(file.Path()) + require.NoError(t, err) + }() + bytesToWrite := 32 + written, err := file.Write(randBytes(bytesToWrite)) + require.NoError(t, err) + require.Equal(t, bytesToWrite, written) + require.Equal(t, int64(bytesToWrite), file.Size()) +} + +func Test_OpenAndReadFile(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + file, err := store.Open(Path(existingFile)) + require.NoError(t, err) + size := file.Size() + require.NotEqual(t, -1, size) + pos := int64(size / 2) + offset, err := file.Seek(pos, 0) + require.NoError(t, err) + require.Equal(t, pos, offset) + buffer := make([]byte, size/2) + read, err := file.Read(buffer) + require.NoError(t, err) + require.Equal(t, int(size/2), read) + err = file.Close() + require.NoError(t, err) +} + +func Test_CopyFile(t *testing.T) { + store, err := NewLocalFileStore(baseDir) + require.NoError(t, err) + file, err := store.Open(Path(existingFile)) + require.NoError(t, err) + newFile := Path("newFile.txt") + newPath, err := store.Store(newFile, file) + require.NoError(t, err) + err = store.Delete(newPath) + require.NoError(t, err) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/File.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/File.go new file mode 100644 index 00000000000..dfeac9eeb33 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/File.go @@ -0,0 +1,133 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + filestore "github.com/filecoin-project/go-fil-markets/filestore" +) + +// File is an autogenerated mock type for the File type +type File struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *File) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Path provides a mock function with given fields: +func (_m *File) Path() filestore.Path { + ret := _m.Called() + + var r0 filestore.Path + if rf, ok := ret.Get(0).(func() filestore.Path); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(filestore.Path) + } + + return r0 +} + +// OsPath provides a mock function with given fields: +func (_m *File) OsPath() filestore.OsPath { + ret := _m.Called() + + var r0 filestore.OsPath + if rf, ok := ret.Get(0).(func() filestore.OsPath); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(filestore.OsPath) + } + + return r0 +} + +// Read provides a mock function with given fields: p +func (_m *File) Read(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Seek provides a mock function with given fields: offset, whence +func (_m *File) Seek(offset int64, whence int) (int64, error) { + ret := _m.Called(offset, whence) + + var r0 int64 + if rf, ok := ret.Get(0).(func(int64, int) int64); ok { + r0 = rf(offset, whence) + } else { + r0 = ret.Get(0).(int64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64, int) error); ok { + r1 = rf(offset, whence) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Size provides a mock function with given fields: +func (_m *File) Size() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// Write provides a mock function with given fields: p +func (_m *File) Write(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/FileStore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/FileStore.go new file mode 100644 index 00000000000..de3d668041e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/mocks/FileStore.go @@ -0,0 +1,118 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + filestore "github.com/filecoin-project/go-fil-markets/filestore" +) + +// FileStore is an autogenerated mock type for the FileStore type +type FileStore struct { + mock.Mock +} + +// Create provides a mock function with given fields: p +func (_m *FileStore) Create(p filestore.Path) (filestore.File, error) { + ret := _m.Called(p) + + var r0 filestore.File + if rf, ok := ret.Get(0).(func(filestore.Path) filestore.File); ok { + r0 = rf(p) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(filestore.File) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(filestore.Path) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateTemp provides a mock function with given fields: +func (_m *FileStore) CreateTemp() (filestore.File, error) { + ret := _m.Called() + + var r0 filestore.File + if rf, ok := ret.Get(0).(func() filestore.File); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(filestore.File) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: p +func (_m *FileStore) Delete(p filestore.Path) error { + ret := _m.Called(p) + + var r0 error + if rf, ok := ret.Get(0).(func(filestore.Path) error); ok { + r0 = rf(p) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Open provides a mock function with given fields: p +func (_m *FileStore) Open(p filestore.Path) (filestore.File, error) { + ret := _m.Called(p) + + var r0 filestore.File + if rf, ok := ret.Get(0).(func(filestore.Path) filestore.File); ok { + r0 = rf(p) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(filestore.File) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(filestore.Path) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Store provides a mock function with given fields: p, f +func (_m *FileStore) Store(p filestore.Path, f filestore.File) (filestore.Path, error) { + ret := _m.Called(p, f) + + var r0 filestore.Path + if rf, ok := ret.Get(0).(func(filestore.Path, filestore.File) filestore.Path); ok { + r0 = rf(p, f) + } else { + r0 = ret.Get(0).(filestore.Path) + } + + var r1 error + if rf, ok := ret.Get(1).(func(filestore.Path, filestore.File) error); ok { + r1 = rf(p, f) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/types.go new file mode 100644 index 00000000000..d3e8403195e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/filestore/types.go @@ -0,0 +1,38 @@ +package filestore + +import ( + "io" +) + +// Path represents an abstract path to a file +type Path string + +// OsPath represents a path that can be located on +// the operating system with standard os.File operations +type OsPath string + +// File is a wrapper around an os file +type File interface { + Path() Path + OsPath() OsPath + Size() int64 + + io.Closer + io.Reader + io.Writer + io.Seeker +} + +// FileStore is an abstract filestore, used for storing temporary file data +// when handing off a deal to the Storage Mining module. Files are created by +// the storage market module, their path is given to the storage mining module +// when AddPiece is called. The Storage Mining module then reads from them +// from the FileStore, and deletes them once they have been sealed in a sector +type FileStore interface { + Open(p Path) (File, error) + Create(p Path) (File, error) + Store(p Path, f File) (Path, error) + Delete(p Path) error + + CreateTemp() (File, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/go.mod b/extern/sxx-go-fil-markets@v1.24.0-v17/go.mod new file mode 100644 index 00000000000..d82bd43ce71 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/go.mod @@ -0,0 +1,151 @@ +module github.com/filecoin-project/go-fil-markets + +go 1.17 + +require ( + github.com/filecoin-project/dagstore v0.5.2 + github.com/filecoin-project/go-address v0.0.6 + github.com/filecoin-project/go-cbor-util v0.0.1 + github.com/filecoin-project/go-commp-utils v0.1.3 + github.com/filecoin-project/go-data-transfer v1.15.2 + github.com/filecoin-project/go-ds-versioning v0.1.1 + github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 + github.com/filecoin-project/go-padreader v0.0.1 + github.com/filecoin-project/go-state-types v0.1.12 + github.com/filecoin-project/go-statemachine v1.0.2-0.20220322104818-27f8fbb86dfd + github.com/filecoin-project/go-statestore v0.2.0 + github.com/filecoin-project/index-provider v0.8.1 + github.com/filecoin-project/specs-actors v0.9.13 + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/hashicorp/go-multierror v1.1.1 + github.com/ipfs/go-block-format v0.0.3 + github.com/ipfs/go-blockservice v0.3.0 + github.com/ipfs/go-cid v0.2.0 + github.com/ipfs/go-cidutil v0.1.0 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-filestore v1.2.0 + github.com/ipfs/go-graphsync v0.13.1 + github.com/ipfs/go-ipfs-blockstore v1.2.0 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-offline v0.2.0 + github.com/ipfs/go-ipfs-files v0.0.9 + github.com/ipfs/go-ipld-cbor v0.0.6 + github.com/ipfs/go-ipld-format v0.4.0 + github.com/ipfs/go-log/v2 v2.5.1 + github.com/ipfs/go-merkledag v0.6.0 + github.com/ipfs/go-unixfs v0.3.1 + github.com/ipld/go-car v0.4.0 + github.com/ipld/go-car/v2 v2.4.1 + github.com/ipld/go-ipld-prime v0.17.0 + github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c + github.com/jpillora/backoff v1.0.0 + github.com/libp2p/go-libp2p v0.21.0 + github.com/libp2p/go-libp2p-core v0.19.1 + github.com/multiformats/go-multiaddr v0.6.0 + github.com/multiformats/go-multibase v0.1.1 + github.com/multiformats/go-multicodec v0.5.0 + github.com/multiformats/go-multihash v0.2.0 + github.com/multiformats/go-varint v0.0.6 + github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e + github.com/stretchr/testify v1.8.0 + github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 + github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799 + golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 + golang.org/x/net v0.0.0-20220630215102-69896b714898 + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f +) + +require ( + github.com/Stebalien/go-bitfield v0.0.1 // indirect + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/bep/debounce v1.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f // indirect + github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect + github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 // indirect + github.com/filecoin-project/go-bitfield v0.2.4 // indirect + github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect + github.com/filecoin-project/storetheindex v0.4.17 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/huin/goupnp v1.0.3 // indirect + github.com/ipfs/bbloom v0.0.4 // indirect + github.com/ipfs/go-bitfield v1.0.0 // indirect + github.com/ipfs/go-ipfs-exchange-interface v0.1.0 // indirect + github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect + github.com/ipfs/go-ipfs-pq v0.0.2 // indirect + github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-legacy v0.1.0 // indirect + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-metrics-interface v0.0.1 // indirect + github.com/ipfs/go-peertaskqueue v0.7.1 // indirect + github.com/ipfs/go-unixfsnode v1.4.0 // indirect + github.com/ipfs/go-verifcid v0.0.1 // indirect + github.com/ipld/go-codec-dagpb v1.3.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/cpuid/v2 v2.0.14 // indirect + github.com/koron/go-ssdp v0.0.3 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-eventbus v0.2.1 // indirect + github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-peerstore v0.7.1 // indirect + github.com/libp2p/go-msgio v0.2.0 // indirect + github.com/libp2p/go-nat v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.0 // indirect + github.com/libp2p/go-openssl v0.0.7 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.0.4 // indirect + github.com/multiformats/go-base36 v0.1.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + github.com/urfave/cli/v2 v2.8.1 // indirect + github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/sdk v1.2.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/tools v0.1.11 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) + +replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/go.sum b/extern/sxx-go-fil-markets@v1.24.0-v17/go.sum new file mode 100644 index 00000000000..2e6fd4edea5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/go.sum @@ -0,0 +1,2223 @@ +bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/akrylysov/pogreb v0.10.1/go.mod h1:pNs6QmpQ1UlTJKDezuRWmaqkgUE2TuU0YTWyqJZ7+lI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= +github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302/go.mod h1:qBlWZqWeVx9BjvqBsnC/8RUlAYpIFmPvgROcw0n1scE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= +github.com/filecoin-project/dagstore v0.5.2/go.mod h1:mdqKzYrRBHf1pRMthYfMv3n37oOw0Tkx7+TxPt240M0= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= +github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= +github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837/go.mod h1:e2YBjSblNVoBckkbv3PPqsq71q98oFkFqL7s1etViGo= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-dagaggregator-unixfs v0.2.0/go.mod h1:WTuJWgBQY0omnQqa8kRPT9O0Uj5wQOgslVMUuTeHdJ8= +github.com/filecoin-project/go-data-transfer v1.15.1/go.mod h1:dXsUoDjR9tKN7aV6R0BBDNNBPzbNvrrNuWt9MUn3yYc= +github.com/filecoin-project/go-data-transfer v1.15.2 h1:PzqsFr2Q/onMGKrGh7TtRT0dKsJcVJrioJJnjnKmxlk= +github.com/filecoin-project/go-data-transfer v1.15.2/go.mod h1:qXOJ3IF5dEJQHykXXTwcaRxu17bXAxr+LglXzkL6bZQ= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= +github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= +github.com/filecoin-project/go-ds-versioning v0.1.1/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= +github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= +github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= +github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= +github.com/filecoin-project/go-indexer-core v0.2.16/go.mod h1:5kCKyhtT9k1vephr9l9SFGX8B/HowXIvOhGCkmbxwbY= +github.com/filecoin-project/go-legs v0.4.4 h1:mpMmAOOnamaz0CV9rgeKhEWA8j9kMC+f+UGCGrxKaZo= +github.com/filecoin-project/go-legs v0.4.4/go.mod h1:JQ3hA6xpJdbR8euZ2rO0jkxaMxeidXf0LDnVuqPAe9s= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.8/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= +github.com/filecoin-project/go-state-types v0.1.12 h1:KsC820XAwhhlcS7Fu6Yq7Bim53NbVe+4rWKS+81h+LM= +github.com/filecoin-project/go-state-types v0.1.12/go.mod h1:n/kujdC9JphvYTrmaD1+vJpvDPy/DwzckoMzP0nBKWI= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statemachine v1.0.2-0.20220322104818-27f8fbb86dfd h1:Ykxbz+LvSCUIl2zFaaPGmF8KHXTJu9T/PymgHr7IHjs= +github.com/filecoin-project/go-statemachine v1.0.2-0.20220322104818-27f8fbb86dfd/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/index-provider v0.8.1 h1:ggoBWvMSWR91HZQCWfv8SZjoTGNyJBwNMLuN9bJZrbU= +github.com/filecoin-project/index-provider v0.8.1/go.mod h1:c/Ym5HtWPp9NQgNc9dgSBMpSNsZ/DE9FEi9qVubl5RM= +github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= +github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/storetheindex v0.4.17 h1:w0dVc954TGPukoVbidlYvn9Xt+wVhk5vBvrqeJiRo8I= +github.com/filecoin-project/storetheindex v0.4.17/go.mod h1:y2dL8C5D3PXi183hdxgGtM8vVYOZ1lg515tpl/D3tN8= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/gammazero/keymutex v0.0.2/go.mod h1:qtzWCCLMisQUmVa4dvqHVgwfh4BP2YB7JxNDGXnsKrs= +github.com/gammazero/radixtree v0.2.5/go.mod h1:VPqqCDZ3YZZxAzUUsIF/ytFBigVWV7JIV1Stld8hri0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= +github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= +github.com/ipfs/go-bitswap v0.6.0 h1:f2rc6GZtoSFhEIzQmddgGiel9xntj02Dg0ZNf2hSC+w= +github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= +github.com/ipfs/go-blockservice v0.3.0 h1:cDgcZ+0P0Ih3sl8+qjFr2sVaMdysg/YZpLj5WJ8kiiw= +github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= +github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-delegated-routing v0.2.2/go.mod h1:T8wrRhlXBHLPUR3bZQgArHPfdi7nBfOsZ1m5fr9tAp4= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-flatfs v0.5.1/go.mod h1:RWTV7oZD/yZYBKdbVIFXTX2fdY2Tbvl94NsWqmoyAX4= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= +github.com/ipfs/go-filestore v1.2.0 h1:O2wg7wdibwxkEDcl7xkuQsPvJFRBVgVSsOJ/GP6z3yU= +github.com/ipfs/go-filestore v1.2.0/go.mod h1:HLJrCxRXquTeEEpde4lTLMaE/MYJZD7WHLkp9z6+FF8= +github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= +github.com/ipfs/go-graphsync v0.13.1 h1:lWiP/WLycoPUYyj3IDEi1GJNP30kFuYOvimcfeuZyQs= +github.com/ipfs/go-graphsync v0.13.1/go.mod h1:y8e8G6CmZeL9Srvx1l15CtGiRdf3h5JdQuqPz/iYL0A= +github.com/ipfs/go-ipfs v0.12.1/go.mod h1:Sbei4ScHevs2v47nUgONQMtHlUfaJjjTNDbhUU1OzOM= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.0.4-0.20210205083733-fb07d7bc5aec/go.mod h1:feuklK+m9POeWJzYQO7l05yNEgUiX5oELBNA8/Be33E= +github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-cmds v0.6.0/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= +github.com/ipfs/go-ipfs-config v0.18.0/go.mod h1:wz2lKzOjgJeYJa6zx8W9VT7mz+iSd0laBMqS/9wmX6A= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.2.0 h1:2PF4o4A7W656rC0RxuhUace997FTcDTcIQ6NoEtyjAI= +github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.0.9 h1:OFyOfmuVDu9c5YtjSDORmwXzE6fmZikzZpzsnNkgFEg= +github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= +github.com/ipfs/go-ipfs-keystore v0.0.2/go.mod h1:H49tRmibOEs7gLMgbOsjC4dqh1u5e0R/SWuc2ScfgSo= +github.com/ipfs/go-ipfs-pinner v0.2.1/go.mod h1:l1AtLL5bovb7opnG77sh4Y10waINz3Y1ni6CvTzx7oo= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-provider v0.7.1/go.mod h1:QwdDYRYnC5sYGLlOwVDY/0ZB6T3zcMtu+5+GdGeUuw8= +github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= +github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.4.0/go.mod h1:XshXBkhyeS63YNGisLL1uDSfuTyrQIxVUOg3ojR5MOE= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= +github.com/ipfs/go-merkledag v0.6.0 h1:oV5WT2321tS4YQVOPgIrWHvJ0lJobRTerU+i9nmUCuA= +github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= +github.com/ipfs/go-mfs v0.2.1/go.mod h1:Woj80iuw4ajDnIP6+seRaoHpPsc9hmL0pk/nDNDWP88= +github.com/ipfs/go-namesys v0.4.0/go.mod h1:jpJwzodyP8DZdWN6DShRjVZw6gaqMr4nQLBSxU5cR6E= +github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= +github.com/ipfs/go-path v0.0.9/go.mod h1:VpDkSBKQ9EFQOUgi54Tq/O/tGi8n1RfYNks13M3DEs8= +github.com/ipfs/go-path v0.1.1/go.mod h1:vC8q4AKOtrjJz2NnllIrmr2ZbGlF5fW2OKKyhV9ggb0= +github.com/ipfs/go-path v0.2.1/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= +github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-pinning-service-http-client v0.1.0/go.mod h1:tcCKmlkWWH9JUUkKs8CrOZBanacNc1dmKLfjlyXAMu4= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= +github.com/ipfs/go-unixfsnode v1.1.3/go.mod h1:ZZxUM5wXBC+G0Co9FjrYTOm+UlhZTjxLfRYdWY9veZ4= +github.com/ipfs/go-unixfsnode v1.4.0 h1:9BUxHBXrbNi8mWHc6j+5C580WJqtVw9uoeEKn4tMhwA= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/interface-go-ipfs-core v0.4.0/go.mod h1:UJBcU6iNennuI05amq3FQ7g0JHUkibHFAfhfUIy927o= +github.com/ipfs/interface-go-ipfs-core v0.5.2/go.mod h1:lNBJrdXHtWS46evMPBdWtDQMDsrKcGbxCOGoKLkztOE= +github.com/ipfs/tar-utils v0.0.2/go.mod h1:4qlnRWgTVljIMhSG2SqRYn66NT+3wrv/kZt9V+eqxDM= +github.com/ipld/edelweiss v0.1.2/go.mod h1:14NnBVHgrPO8cqDnKg7vc69LGI0aCAcax6mj21+99ec= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= +github.com/ipld/go-car v0.3.2/go.mod h1:WEjynkVt04dr0GwJhry0KlaTeSDEiEYyMPOxDBQ17KE= +github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ= +github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.4.1 h1:9S+FYbQzQJ/XzsdiOV13W5Iu/i+gUnr6csbSD9laFEg= +github.com/ipld/go-car/v2 v2.4.1/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0= +github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= +github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1 h1:yVNlWRQexCa54ln3MSIiUN++ItH7pdhBFhh0hSgZu1w= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= +github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= +github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.4-0.20211217152141-008fd70fc96f/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.17.0 h1:+U2peiA3aQsE7mrXjD2nYZaZrCcakoz2Wge8K42Ld8g= +github.com/ipld/go-ipld-prime v0.17.0/go.mod h1:aYcKm5TIvGfY8P3QBKz/2gKcLxzJ1zDaD+o0bOowhgs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-storethehash v0.1.7/go.mod h1:O2CgbSwJfXCrYsjA1g1M7zJmVzzg71BM00ds6pyMLAQ= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.0.14 h1:QRqdp6bb9M9S5yyKeYteXKuoKE4p0tGlra81fKOpWH8= +github.com/klauspost/cpuid/v2 v2.0.14/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= +github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= +github.com/libp2p/go-doh-resolver v0.3.1/go.mod h1:y5go1ZppAq9N2eppbX0xON01CyPBeUg2yS6BTssssog= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= +github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= +github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.18.0/go.mod h1:+veaZ9z1SZQhmc5PW78jvnnxZ89Mgvmh4cggO11ETmw= +github.com/libp2p/go-libp2p v0.19.4/go.mod h1:MIt8y481VDhUe4ErWi1a4bvt/CjjFfOq6kZTothWIXY= +github.com/libp2p/go-libp2p v0.20.1/go.mod h1:XgJHsOhEBVBXp/2Sj9bm/yEyD94uunAaP6oaegdcKks= +github.com/libp2p/go-libp2p v0.21.0 h1:s9yYScuIFY33FOOzwTXbc8QqbvsRyKIWFf0FCSJKrfM= +github.com/libp2p/go-libp2p v0.21.0/go.mod h1:zvcA6/C4mr5/XQarRICh+L1SN9dAHHlSWDq4x5VYxg4= +github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= +github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= +github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= +github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= +github.com/libp2p/go-libp2p-core v0.15.1/go.mod h1:agSaboYM4hzB1cWekgVReqV5M4g5M+2eNNejV+1EEhs= +github.com/libp2p/go-libp2p-core v0.16.1/go.mod h1:O3i/7y+LqUb0N+qhzXjBjjpchgptWAVMG1Voegk7b4c= +github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= +github.com/libp2p/go-libp2p-core v0.19.1 h1:zaZQQCeCrFMtxFa1wHy6AhsVynyNmZAvwgWqSSPT3WE= +github.com/libp2p/go-libp2p-core v0.19.1/go.mod h1:2uLhmmqDiFY+dw+70KkBLeKvvsJHGWUINRDdeV1ip7k= +github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= +github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= +github.com/libp2p/go-libp2p-discovery v0.7.0 h1:6Iu3NyningTb/BmUnEhcTwzwbs4zcywwbfTulM9LHuc= +github.com/libp2p/go-libp2p-discovery v0.7.0/go.mod h1:zPug0Rxib1aQG9iIdwOpRpBf18cAfZgzicO826UQP4I= +github.com/libp2p/go-libp2p-gostream v0.3.0/go.mod h1:pLBQu8db7vBMNINGsAwLL/ZCE8wng5V1FThoaE5rNjc= +github.com/libp2p/go-libp2p-gostream v0.3.1 h1:XlwohsPn6uopGluEWs1Csv1QCEjrTXf2ZQagzZ5paAg= +github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI= +github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= +github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= +github.com/libp2p/go-libp2p-http v0.2.1/go.mod h1:9KdioZ7XqNH0eZkZG9bulZLzHv11A7/12fT97agqWhg= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= +github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= +github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= +github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= +github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= +github.com/libp2p/go-libp2p-mplex v0.6.0/go.mod h1:i3usuPrBbh9FD2fLZjGpotyNkwr42KStYZQY7BeTiu4= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= +github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= +github.com/libp2p/go-libp2p-noise v0.4.0/go.mod h1:BzzY5pyzCYSyJbQy9oD8z5oP2idsafjt4/X42h9DjZU= +github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= +github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= +github.com/libp2p/go-libp2p-peerstore v0.7.0/go.mod h1:cdUWTHro83vpg6unCpGUr8qJoX3e93Vy8o97u5ppIM0= +github.com/libp2p/go-libp2p-peerstore v0.7.1 h1:7FpALlqR+3+oOBXdzm3AVt0vjMYLW1b7jM03E4iEHlw= +github.com/libp2p/go-libp2p-peerstore v0.7.1/go.mod h1:cdUWTHro83vpg6unCpGUr8qJoX3e93Vy8o97u5ppIM0= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= +github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= +github.com/libp2p/go-libp2p-pubsub v0.7.0 h1:Fd9198JVc3pCsKuzd37TclzM0QcHA+uDyoiG2pvT7s4= +github.com/libp2p/go-libp2p-pubsub v0.7.0/go.mod h1:EuyBJFtF8qF67IEA98biwK8Xnw5MNJpJ/Z+8iWCMFwc= +github.com/libp2p/go-libp2p-pubsub-router v0.5.0/go.mod h1:TRJKskSem3C0aSb3CmRgPwq6IleVFzds6hS09fmZbGM= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.1/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= +github.com/libp2p/go-libp2p-quic-transport v0.17.0/go.mod h1:x4pw61P3/GRCcSLypcQJE/Q2+E9f4X+5aRcZLXf20LM= +github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= +github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= +github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-resource-manager v0.1.5/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.2.1/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= +github.com/libp2p/go-libp2p-resource-manager v0.3.0/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= +github.com/libp2p/go-libp2p-resource-manager v0.5.1 h1:jm0mdqn7yfh7wbUzlj948BYZX0KZ3RW7OqerkGQ5rYY= +github.com/libp2p/go-libp2p-resource-manager v0.5.1/go.mod h1:CggtV6EZb+Y0dGh41q5ezO4udcVKyhcEFpydHD8EMe0= +github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= +github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= +github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= +github.com/libp2p/go-libp2p-swarm v0.10.2/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= +github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= +github.com/libp2p/go-libp2p-testing v0.8.0/go.mod h1:gRdsNxQSxAZowTgcLY7CC33xPmleZzoBpqSYbWenqPc= +github.com/libp2p/go-libp2p-testing v0.9.0/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= +github.com/libp2p/go-libp2p-testing v0.9.2/go.mod h1:Td7kbdkWqYTJYQGTwzlgXwaqldraIanyjuRiAbK/XQU= +github.com/libp2p/go-libp2p-testing v0.11.0 h1:+R7FRl/U3Y00neyBSM2qgDzqz3HkWH24U9nMlascHL4= +github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.4.1/go.mod h1:EKCixHEysLNDlLUoKxv+3f/Lp90O2EXNjTr0UQDnrIw= +github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= +github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= +github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= +github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= +github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= +github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= +github.com/libp2p/go-libp2p-yamux v0.8.2/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= +github.com/libp2p/go-libp2p-yamux v0.9.1/go.mod h1:wRc6wvyxQINFcKe7daL4BeQ02Iyp+wxyC8WCNfngBrA= +github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= +github.com/libp2p/go-mplex v0.6.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-socket-activation v0.1.0/go.mod h1:gzda2dNkMG5Ti2OfWNNwW0FDIbj0g/aJJU320FcLfhk= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= +github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyHuZSVrJCBl55nRBOMmiSL/dyziw= +github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= +github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= +github.com/libp2p/go-tcp-transport v0.5.1/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= +github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= +github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= +github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= +github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= +github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= +github.com/libp2p/go-yamux/v3 v3.1.1/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= +github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= +github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= +github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= +github.com/lucas-clemente/quic-go v0.27.1/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI= +github.com/lucas-clemente/quic-go v0.28.0 h1:9eXVRgIkMQQyiyorz/dAaOYIx3TFzXsIFkNFz4cxuJM= +github.com/lucas-clemente/quic-go v0.28.0/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.1/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= +github.com/marten-seemann/qtls-go1-18 v0.1.1/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 h1:7m/WlWcSROrcK5NxuXaxYD32BZqe/LEEnBrWcH/cOqQ= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= +github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= +github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= +github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= +github.com/multiformats/go-multihash v0.2.0 h1:oytJb9ZA1OUW0r0f9ea18GiaPOo4SXyc7p2movyUuo4= +github.com/multiformats/go-multihash v0.2.0/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-multistream v0.3.0/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.1/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= +github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.10.0 h1:E86YlUMYfwIacEsQGlnTvjk1IgYkyTGjPhF0RnwTCmw= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799 h1:DOOT2B85S0tHoLGTzV+FakaSSihgRCVwZkjqKQP5L/w= +github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xorcare/golden v0.6.0 h1:E8emU8bhyMIEpYmgekkTUaw4vtcrRE+Wa0c5wYIcgXc= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/ybbus/jsonrpc/v2 v2.1.6/go.mod h1:rIuG1+ORoiqocf9xs/v+ecaAVeo3zcZHQgInyKFMeg0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.12.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/fx v1.15.0/go.mod h1:jI3RazQUhGv5KkpZIRv+kuP4CcgX3fnc0qX8bLnzbx8= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220418201149-a630d4f3e7a2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220630215102-69896b714898 h1:K7wO6V1IrczY9QOQ2WkVpw4JQSwCd52UsxVEirZUfiw= +golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/package-lock.json b/extern/sxx-go-fil-markets@v1.24.0-v17/package-lock.json new file mode 100644 index 00000000000..3c3468bd3ca --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/package-lock.json @@ -0,0 +1,838 @@ +{ + "name": "go-fil-markets", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "version": "1.0.0", + "license": "(Apache-2.0 OR MIT)", + "dependencies": { + "@mermaid-js/mermaid-cli": "^8.5.1-2" + } + }, + "node_modules/@mermaid-js/mermaid-cli": { + "version": "8.5.1-2", + "resolved": "https://registry.npmjs.org/@mermaid-js/mermaid-cli/-/mermaid-cli-8.5.1-2.tgz", + "integrity": "sha512-IGYWJZLlV7kx0NOnREvu2Ikioyg0AX59dbXoY13t1zdjUlrr5YQRgDVaIySkOo347yLB6z6thTmuxaYAw/KTHg==", + "dependencies": { + "chalk": "^3.0.0", + "commander": "^4.0.1", + "puppeteer": "^2.0.0" + }, + "bin": { + "mmdc": "index.bundle.js" + } + }, + "node_modules/@types/color-name": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", + "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" + }, + "node_modules/@types/mime-types": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/mime-types/-/mime-types-2.1.0.tgz", + "integrity": "sha1-nKUs2jY/aZxpRmwqbM2q2RPqenM=" + }, + "node_modules/agent-base": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-5.1.1.tgz", + "integrity": "sha512-TMeqbNl2fMW0nMjTEPOwe3J/PRFP4vqeoNuQMG0HlMrtm5QxKqdvAkZ1pRBQ/ulIyDD5Yq0nJ7YbdD8ey0TO3g==", + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/ansi-styles": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", + "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "dependencies": { + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/async-limiter": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", + "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" + }, + "node_modules/balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + }, + "node_modules/chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "node_modules/concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "engines": [ + "node >= 0.8" + ], + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "node_modules/debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "deprecated": "Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797)", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/extract-zip": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", + "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", + "dependencies": { + "concat-stream": "^1.6.2", + "debug": "^2.6.9", + "mkdirp": "^0.5.4", + "yauzl": "^2.10.0" + }, + "bin": { + "extract-zip": "cli.js" + } + }, + "node_modules/extract-zip/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/extract-zip/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "node_modules/glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/https-proxy-agent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-4.0.0.tgz", + "integrity": "sha512-zoDhWrkR3of1l9QAL8/scJZyLu8j/gBkcwcaQOZh7Gyh/+uJQzGVETdgT30akuwkpL8HTRfssqI3BZuV18teDg==", + "dependencies": { + "agent-base": "5", + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "node_modules/mime": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.6.tgz", + "integrity": "sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/mime-db": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", + "dependencies": { + "mime-db": "1.44.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "node_modules/mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "dependencies": { + "minimist": "^1.2.5" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=" + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/puppeteer": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-2.1.1.tgz", + "integrity": "sha512-LWzaDVQkk1EPiuYeTOj+CZRIjda4k2s5w4MK4xoH2+kgWV/SDlkYHmxatDdtYrciHUKSXTsGgPgPP8ILVdBsxg==", + "hasInstallScript": true, + "dependencies": { + "@types/mime-types": "^2.1.0", + "debug": "^4.1.0", + "extract-zip": "^1.6.6", + "https-proxy-agent": "^4.0.0", + "mime": "^2.0.3", + "mime-types": "^2.1.25", + "progress": "^2.0.1", + "proxy-from-env": "^1.0.0", + "rimraf": "^2.6.1", + "ws": "^6.1.0" + }, + "engines": { + "node": ">=8.16.0" + } + }, + "node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/supports-color": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "node_modules/ws": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", + "dependencies": { + "async-limiter": "~1.0.0" + } + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + } + }, + "dependencies": { + "@mermaid-js/mermaid-cli": { + "version": "8.5.1-2", + "resolved": "https://registry.npmjs.org/@mermaid-js/mermaid-cli/-/mermaid-cli-8.5.1-2.tgz", + "integrity": "sha512-IGYWJZLlV7kx0NOnREvu2Ikioyg0AX59dbXoY13t1zdjUlrr5YQRgDVaIySkOo347yLB6z6thTmuxaYAw/KTHg==", + "requires": { + "chalk": "^3.0.0", + "commander": "^4.0.1", + "puppeteer": "^2.0.0" + } + }, + "@types/color-name": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", + "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" + }, + "@types/mime-types": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/mime-types/-/mime-types-2.1.0.tgz", + "integrity": "sha1-nKUs2jY/aZxpRmwqbM2q2RPqenM=" + }, + "agent-base": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-5.1.1.tgz", + "integrity": "sha512-TMeqbNl2fMW0nMjTEPOwe3J/PRFP4vqeoNuQMG0HlMrtm5QxKqdvAkZ1pRBQ/ulIyDD5Yq0nJ7YbdD8ey0TO3g==" + }, + "ansi-styles": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", + "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "requires": { + "@types/color-name": "^1.1.1", + "color-convert": "^2.0.1" + } + }, + "async-limiter": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz", + "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==" + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=" + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + }, + "chalk": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", + "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==" + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "requires": { + "ms": "^2.1.1" + } + }, + "extract-zip": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", + "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", + "requires": { + "concat-stream": "^1.6.2", + "debug": "^2.6.9", + "mkdirp": "^0.5.4", + "yauzl": "^2.10.0" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + } + } + }, + "fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", + "requires": { + "pend": "~1.2.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" + }, + "https-proxy-agent": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-4.0.0.tgz", + "integrity": "sha512-zoDhWrkR3of1l9QAL8/scJZyLu8j/gBkcwcaQOZh7Gyh/+uJQzGVETdgT30akuwkpL8HTRfssqI3BZuV18teDg==", + "requires": { + "agent-base": "5", + "debug": "4" + } + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "mime": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.6.tgz", + "integrity": "sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA==" + }, + "mime-db": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==" + }, + "mime-types": { + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", + "requires": { + "mime-db": "1.44.0" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "requires": { + "wrappy": "1" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" + }, + "pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=" + }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==" + }, + "proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "puppeteer": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-2.1.1.tgz", + "integrity": "sha512-LWzaDVQkk1EPiuYeTOj+CZRIjda4k2s5w4MK4xoH2+kgWV/SDlkYHmxatDdtYrciHUKSXTsGgPgPP8ILVdBsxg==", + "requires": { + "@types/mime-types": "^2.1.0", + "debug": "^4.1.0", + "extract-zip": "^1.6.6", + "https-proxy-agent": "^4.0.0", + "mime": "^2.0.3", + "mime-types": "^2.1.25", + "progress": "^2.0.1", + "proxy-from-env": "^1.0.0", + "rimraf": "^2.6.1", + "ws": "^6.1.0" + } + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "requires": { + "glob": "^7.1.3" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "supports-color": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", + "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "requires": { + "has-flag": "^4.0.0" + } + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "ws": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", + "requires": { + "async-limiter": "~1.0.0" + } + }, + "yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", + "requires": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + } + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/package.json b/extern/sxx-go-fil-markets@v1.24.0-v17/package.json new file mode 100644 index 00000000000..f0cc638336b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/package.json @@ -0,0 +1,25 @@ +{ + "name": "go-fil-markets", + "version": "1.0.0", + "description": "[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) [![CircleCI](https://circleci.com/gh/filecoin-project/go-fil-markets.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-fil-markets) [![codecov](https://codecov.io/gh/filecoin-project/go-fil-markets/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-fil-markets)", + "main": "index.js", + "directories": { + "doc": "docs" + }, + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/filecoin-project/go-fil-markets.git" + }, + "author": "", + "license": "(Apache-2.0 OR MIT)", + "bugs": { + "url": "https://github.com/filecoin-project/go-fil-markets/issues" + }, + "homepage": "https://github.com/filecoin-project/go-fil-markets#readme", + "dependencies": { + "@mermaid-js/mermaid-cli": "^8.5.1-2" + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/README.md b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/README.md new file mode 100644 index 00000000000..21c490a857c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/README.md @@ -0,0 +1,48 @@ +# piecestore + +The `piecestore` module is a simple encapsulation of two data stores, one for `PieceInfo` and + another for `CIDInfo`. The piecestore's main goal is to help + [storagemarket module](../storagemarket) and [retrievalmarket module](../retrievalmarket) + find where sealed data lives inside of sectors. Storage market writes the + data, and retrieval market reads it. + +Both markets use `CIDInfo` to look up a Piece that contains the payload, and then + use `PieceInfo` to find the sector that contains the piece. + +The storage market has to write this data before it completes the deal in order to later + look up the payload when the data is served. + +## Installation +```bash +go get github.com/filecoin-project/go-fil-markets/piecestore +``` + +### PieceStore +`PieceStore` is primary export of this module. It is a database +of piece info that can be modified and queried. The PieceStore +interface is implemented in [piecestore.go](./piecestore.go). + +It has two stores, one for `PieceInfo` keyed by `pieceCID`, and another for +`CIDInfo`, keyed by `payloadCID`. These keys are of type `cid.CID`; see +[github.com/ipfs/go-cid](https://github.com/ipfs/go-cid). + +**To initialize a PieceStore** +```go +func NewPieceStore(ds datastore.Batching) PieceStore +``` + +**Parameters** +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + + +`PieceStore` implements the following functions: + +* [`AddDealForPiece`](./piecestore.go) +* [`AddPieceBlockLocations`](./piecestore.go) +* [`GetPieceInfo`](./piecestore.go) +* [`GetCIDInfo`](./piecestore.go) + +Please the [tests](piecestore_test.go) for more information about expected behavior. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore.go new file mode 100644 index 00000000000..f82cf84a7f6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore.go @@ -0,0 +1,214 @@ +package piecestoreimpl + +import ( + "context" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versioned "github.com/filecoin-project/go-ds-versioning/pkg/statestore" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var log = logging.Logger("piecestore") + +// DSPiecePrefix is the name space for storing piece infos +var DSPiecePrefix = "/pieces" + +// DSCIDPrefix is the name space for storing CID infos +var DSCIDPrefix = "/cid-infos" + +// NewPieceStore returns a new piecestore based on the given datastore +func NewPieceStore(ds datastore.Batching) (piecestore.PieceStore, error) { + pieceInfoMigrations, err := migrations.PieceInfoMigrations.Build() + if err != nil { + return nil, err + } + pieces, migratePieces := versioned.NewVersionedStateStore(namespace.Wrap(ds, datastore.NewKey(DSPiecePrefix)), pieceInfoMigrations, versioning.VersionKey("1")) + cidInfoMigrations, err := migrations.CIDInfoMigrations.Build() + if err != nil { + return nil, err + } + cidInfos, migrateCidInfos := versioned.NewVersionedStateStore(namespace.Wrap(ds, datastore.NewKey(DSCIDPrefix)), cidInfoMigrations, versioning.VersionKey("1")) + return &pieceStore{ + readySub: pubsub.New(shared.ReadyDispatcher), + pieces: pieces, + migratePieces: migratePieces, + cidInfos: cidInfos, + migrateCidInfos: migrateCidInfos, + }, nil +} + +type pieceStore struct { + readySub *pubsub.PubSub + migratePieces func(ctx context.Context) error + pieces versioned.StateStore + migrateCidInfos func(ctx context.Context) error + cidInfos versioned.StateStore +} + +func (ps *pieceStore) Start(ctx context.Context) error { + go func() { + var err error + defer func() { + err = ps.readySub.Publish(err) + if err != nil { + log.Warnf("Publish piecestore migration ready event: %s", err.Error()) + } + }() + err = ps.migratePieces(ctx) + if err != nil { + log.Errorf("Migrating pieceInfos: %s", err.Error()) + return + } + err = ps.migrateCidInfos(ctx) + if err != nil { + log.Errorf("Migrating cidInfos: %s", err.Error()) + } + }() + return nil +} + +func (ps *pieceStore) OnReady(ready shared.ReadyFunc) { + ps.readySub.Subscribe(ready) +} + +// Store `dealInfo` in the PieceStore with key `pieceCID`. +func (ps *pieceStore) AddDealForPiece(pieceCID cid.Cid, dealInfo piecestore.DealInfo) error { + return ps.mutatePieceInfo(pieceCID, func(pi *piecestore.PieceInfo) error { + for _, di := range pi.Deals { + if di == dealInfo { + return nil + } + } + pi.Deals = append(pi.Deals, dealInfo) + return nil + }) +} + +// Store the map of blockLocations in the PieceStore's CIDInfo store, with key `pieceCID` +func (ps *pieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]piecestore.BlockLocation) error { + for c, blockLocation := range blockLocations { + err := ps.mutateCIDInfo(c, func(ci *piecestore.CIDInfo) error { + for _, pbl := range ci.PieceBlockLocations { + if pbl.PieceCID.Equals(pieceCID) && pbl.BlockLocation == blockLocation { + return nil + } + } + ci.PieceBlockLocations = append(ci.PieceBlockLocations, piecestore.PieceBlockLocation{BlockLocation: blockLocation, PieceCID: pieceCID}) + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func (ps *pieceStore) ListPieceInfoKeys() ([]cid.Cid, error) { + var pis []piecestore.PieceInfo + if err := ps.pieces.List(&pis); err != nil { + return nil, err + } + + out := make([]cid.Cid, 0, len(pis)) + for _, pi := range pis { + out = append(out, pi.PieceCID) + } + + return out, nil +} + +func (ps *pieceStore) ListCidInfoKeys() ([]cid.Cid, error) { + var cis []piecestore.CIDInfo + if err := ps.cidInfos.List(&cis); err != nil { + return nil, err + } + + out := make([]cid.Cid, 0, len(cis)) + for _, ci := range cis { + out = append(out, ci.CID) + } + + return out, nil +} + +// Retrieve the PieceInfo associated with `pieceCID` from the piece info store. +func (ps *pieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { + var out piecestore.PieceInfo + if err := ps.pieces.Get(pieceCID).Get(&out); err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return piecestore.PieceInfo{}, xerrors.Errorf("piece with CID %s: %w", pieceCID, retrievalmarket.ErrNotFound) + } + return piecestore.PieceInfo{}, err + } + return out, nil +} + +// Retrieve the CIDInfo associated with `pieceCID` from the CID info store. +func (ps *pieceStore) GetCIDInfo(payloadCID cid.Cid) (piecestore.CIDInfo, error) { + var out piecestore.CIDInfo + if err := ps.cidInfos.Get(payloadCID).Get(&out); err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return piecestore.CIDInfo{}, xerrors.Errorf("payload CID %s: %w", payloadCID, retrievalmarket.ErrNotFound) + } + return piecestore.CIDInfo{}, err + } + return out, nil +} + +func (ps *pieceStore) ensurePieceInfo(pieceCID cid.Cid) error { + has, err := ps.pieces.Has(pieceCID) + + if err != nil { + return err + } + if has { + return nil + } + + pieceInfo := piecestore.PieceInfo{PieceCID: pieceCID} + return ps.pieces.Begin(pieceCID, &pieceInfo) +} + +func (ps *pieceStore) ensureCIDInfo(c cid.Cid) error { + has, err := ps.cidInfos.Has(c) + + if err != nil { + return err + } + + if has { + return nil + } + + cidInfo := piecestore.CIDInfo{CID: c} + return ps.cidInfos.Begin(c, &cidInfo) +} + +func (ps *pieceStore) mutatePieceInfo(pieceCID cid.Cid, mutator interface{}) error { + err := ps.ensurePieceInfo(pieceCID) + if err != nil { + return err + } + + return ps.pieces.Get(pieceCID).Mutate(mutator) +} + +func (ps *pieceStore) mutateCIDInfo(c cid.Cid, mutator interface{}) error { + err := ps.ensureCIDInfo(c) + if err != nil { + return err + } + + return ps.cidInfos.Get(c).Mutate(mutator) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore_test.go new file mode 100644 index 00000000000..d7c5eb981a4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/impl/piecestore_test.go @@ -0,0 +1,315 @@ +package piecestoreimpl_test + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" + + "github.com/filecoin-project/go-fil-markets/piecestore" + piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl" + "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestStorePieceInfo(t *testing.T) { + ctx := context.Background() + pieceCid := shared_testutil.GenerateCids(1)[0] + pieceCid2 := shared_testutil.GenerateCids(1)[0] + initializePieceStore := func(t *testing.T, ctx context.Context) piecestore.PieceStore { + ps, err := piecestoreimpl.NewPieceStore(datastore.NewMapDatastore()) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, ps) + _, err = ps.GetPieceInfo(pieceCid) + assert.Error(t, err) + return ps + } + + // Add a deal info + t.Run("can add deals", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ps := initializePieceStore(t, ctx) + dealInfo := piecestore.DealInfo{ + DealID: abi.DealID(rand.Uint64()), + SectorID: abi.SectorNumber(rand.Uint64()), + Offset: abi.PaddedPieceSize(rand.Uint64()), + Length: abi.PaddedPieceSize(rand.Uint64()), + } + err := ps.AddDealForPiece(pieceCid, dealInfo) + assert.NoError(t, err) + + pi, err := ps.GetPieceInfo(pieceCid) + assert.NoError(t, err) + assert.Len(t, pi.Deals, 1) + assert.Equal(t, pi.Deals[0], dealInfo) + + // Verify that getting a piece with a non-existent CID returns ErrNotFound + pi, err = ps.GetPieceInfo(pieceCid2) + assert.Error(t, err) + assert.True(t, xerrors.Is(err, retrievalmarket.ErrNotFound)) + }) + + t.Run("adding same deal twice does not dup", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ps := initializePieceStore(t, ctx) + dealInfo := piecestore.DealInfo{ + DealID: abi.DealID(rand.Uint64()), + SectorID: abi.SectorNumber(rand.Uint64()), + Offset: abi.PaddedPieceSize(rand.Uint64()), + Length: abi.PaddedPieceSize(rand.Uint64()), + } + err := ps.AddDealForPiece(pieceCid, dealInfo) + assert.NoError(t, err) + + pi, err := ps.GetPieceInfo(pieceCid) + assert.NoError(t, err) + assert.Len(t, pi.Deals, 1) + assert.Equal(t, pi.Deals[0], dealInfo) + + err = ps.AddDealForPiece(pieceCid, dealInfo) + assert.NoError(t, err) + + pi, err = ps.GetPieceInfo(pieceCid) + assert.NoError(t, err) + assert.Len(t, pi.Deals, 1) + assert.Equal(t, pi.Deals[0], dealInfo) + }) +} + +func TestStoreCIDInfo(t *testing.T) { + ctx := context.Background() + pieceCids := shared_testutil.GenerateCids(2) + pieceCid1 := pieceCids[0] + pieceCid2 := pieceCids[1] + testCIDs := shared_testutil.GenerateCids(4) + blockLocations := make([]piecestore.BlockLocation, 0, 3) + for i := 0; i < 3; i++ { + blockLocations = append(blockLocations, piecestore.BlockLocation{ + RelOffset: rand.Uint64(), + BlockSize: rand.Uint64(), + }) + } + + initializePieceStore := func(t *testing.T, ctx context.Context) piecestore.PieceStore { + ps, err := piecestoreimpl.NewPieceStore(datastore.NewMapDatastore()) + assert.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, ps) + _, err = ps.GetCIDInfo(testCIDs[0]) + assert.Error(t, err) + return ps + } + + t.Run("can add piece block locations", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ps := initializePieceStore(t, ctx) + err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ + testCIDs[0]: blockLocations[0], + testCIDs[1]: blockLocations[1], + testCIDs[2]: blockLocations[2], + }) + assert.NoError(t, err) + + ci, err := ps.GetCIDInfo(testCIDs[0]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[0], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[1]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[1], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[2]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[2], PieceCID: pieceCid1}) + + // Verify that getting CID info with a non-existent CID returns ErrNotFound + ci, err = ps.GetCIDInfo(testCIDs[3]) + assert.Error(t, err) + assert.True(t, xerrors.Is(err, retrievalmarket.ErrNotFound)) + }) + + t.Run("overlapping adds", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ps := initializePieceStore(t, ctx) + err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ + testCIDs[0]: blockLocations[0], + testCIDs[1]: blockLocations[2], + }) + assert.NoError(t, err) + err = ps.AddPieceBlockLocations(pieceCid2, map[cid.Cid]piecestore.BlockLocation{ + testCIDs[1]: blockLocations[1], + testCIDs[2]: blockLocations[2], + }) + assert.NoError(t, err) + + ci, err := ps.GetCIDInfo(testCIDs[0]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[0], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[1]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 2) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[2], PieceCID: pieceCid1}) + assert.Equal(t, ci.PieceBlockLocations[1], piecestore.PieceBlockLocation{BlockLocation: blockLocations[1], PieceCID: pieceCid2}) + + ci, err = ps.GetCIDInfo(testCIDs[2]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[2], PieceCID: pieceCid2}) + }) + + t.Run("duplicate adds", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + ps := initializePieceStore(t, ctx) + err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ + testCIDs[0]: blockLocations[0], + testCIDs[1]: blockLocations[1], + }) + assert.NoError(t, err) + err = ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ + testCIDs[1]: blockLocations[1], + testCIDs[2]: blockLocations[2], + }) + assert.NoError(t, err) + + ci, err := ps.GetCIDInfo(testCIDs[0]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[0], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[1]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[1], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[2]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[2], PieceCID: pieceCid1}) + }) +} + +func TestMigrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + pieceCids := shared_testutil.GenerateCids(1) + pieceCid1 := pieceCids[0] + testCIDs := shared_testutil.GenerateCids(3) + blockLocations := make([]piecestore.BlockLocation, 0, 3) + for i := 0; i < 3; i++ { + blockLocations = append(blockLocations, piecestore.BlockLocation{ + RelOffset: rand.Uint64(), + BlockSize: rand.Uint64(), + }) + } + dealInfo := piecestore.DealInfo{ + DealID: abi.DealID(rand.Uint64()), + SectorID: abi.SectorNumber(rand.Uint64()), + Offset: abi.PaddedPieceSize(rand.Uint64()), + Length: abi.PaddedPieceSize(rand.Uint64()), + } + + ds := datastore.NewMapDatastore() + + oldCidInfos := statestore.New(namespace.Wrap(ds, datastore.NewKey(piecestoreimpl.DSCIDPrefix))) + err := oldCidInfos.Begin(testCIDs[0], &migrations.CIDInfo0{ + CID: testCIDs[0], + PieceBlockLocations: []migrations.PieceBlockLocation0{ + { + BlockLocation0: migrations.BlockLocation0{ + RelOffset: blockLocations[0].RelOffset, + BlockSize: blockLocations[0].BlockSize, + }, + PieceCID: pieceCid1, + }, + }, + }) + require.NoError(t, err) + err = oldCidInfos.Begin(testCIDs[1], &migrations.CIDInfo0{ + CID: testCIDs[1], + PieceBlockLocations: []migrations.PieceBlockLocation0{ + { + BlockLocation0: migrations.BlockLocation0{ + RelOffset: blockLocations[1].RelOffset, + BlockSize: blockLocations[1].BlockSize, + }, + PieceCID: pieceCid1, + }, + }, + }) + require.NoError(t, err) + err = oldCidInfos.Begin(testCIDs[2], &migrations.CIDInfo0{ + CID: testCIDs[2], + PieceBlockLocations: []migrations.PieceBlockLocation0{ + { + BlockLocation0: migrations.BlockLocation0{ + RelOffset: blockLocations[2].RelOffset, + BlockSize: blockLocations[2].BlockSize, + }, + PieceCID: pieceCid1, + }, + }, + }) + require.NoError(t, err) + oldPieces := statestore.New(namespace.Wrap(ds, datastore.NewKey(piecestoreimpl.DSPiecePrefix))) + err = oldPieces.Begin(pieceCid1, &migrations.PieceInfo0{ + PieceCID: pieceCid1, + Deals: []migrations.DealInfo0{ + { + DealID: dealInfo.DealID, + SectorID: dealInfo.SectorID, + Offset: dealInfo.Offset, + Length: dealInfo.Length, + }, + }, + }) + require.NoError(t, err) + + ps, err := piecestoreimpl.NewPieceStore(ds) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, ps) + + t.Run("migrates deals", func(t *testing.T) { + pi, err := ps.GetPieceInfo(pieceCid1) + assert.NoError(t, err) + assert.Len(t, pi.Deals, 1) + assert.Equal(t, pi.Deals[0], dealInfo) + }) + + t.Run("migrates piece block locations", func(t *testing.T) { + ci, err := ps.GetCIDInfo(testCIDs[0]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[0], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[1]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[1], PieceCID: pieceCid1}) + + ci, err = ps.GetCIDInfo(testCIDs[2]) + assert.NoError(t, err) + assert.Len(t, ci.PieceBlockLocations, 1) + assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{BlockLocation: blockLocations[2], PieceCID: pieceCid1}) + }) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations.go new file mode 100644 index 00000000000..bbaa6e66684 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations.go @@ -0,0 +1,91 @@ +package migrations + +import ( + "github.com/ipfs/go-cid" + + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/piecestore" +) + +//go:generate cbor-gen-for PieceInfo0 DealInfo0 BlockLocation0 PieceBlockLocation0 CIDInfo0 + +// DealInfo0 is version 0 of DealInfo +type DealInfo0 struct { + DealID abi.DealID + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize +} + +// BlockLocation0 is version 0 of BlockLocation +type BlockLocation0 struct { + RelOffset uint64 + BlockSize uint64 +} + +// PieceBlockLocation0 is version 0 of PieceBlockLocation +// is inside of +type PieceBlockLocation0 struct { + BlockLocation0 + PieceCID cid.Cid +} + +// CIDInfo0 is version 0 of CIDInfo +type CIDInfo0 struct { + CID cid.Cid + PieceBlockLocations []PieceBlockLocation0 +} + +// PieceInfo0 is version 0 of PieceInfo +type PieceInfo0 struct { + PieceCID cid.Cid + Deals []DealInfo0 +} + +// MigratePieceInfo0To1 migrates a tuple encoded piece info to a map encoded piece info +func MigratePieceInfo0To1(oldPi *PieceInfo0) (*piecestore.PieceInfo, error) { + deals := make([]piecestore.DealInfo, len(oldPi.Deals)) + for i, oldDi := range oldPi.Deals { + deals[i] = piecestore.DealInfo{ + DealID: oldDi.DealID, + SectorID: oldDi.SectorID, + Offset: oldDi.Offset, + Length: oldDi.Length, + } + } + return &piecestore.PieceInfo{ + PieceCID: oldPi.PieceCID, + Deals: deals, + }, nil +} + +// MigrateCidInfo0To1 migrates a tuple encoded cid info to a map encoded cid info +func MigrateCidInfo0To1(oldCi *CIDInfo0) (*piecestore.CIDInfo, error) { + pieceBlockLocations := make([]piecestore.PieceBlockLocation, len(oldCi.PieceBlockLocations)) + for i, oldPbl := range oldCi.PieceBlockLocations { + pieceBlockLocations[i] = piecestore.PieceBlockLocation{ + BlockLocation: piecestore.BlockLocation{ + RelOffset: oldPbl.RelOffset, + BlockSize: oldPbl.BlockSize, + }, + PieceCID: oldPbl.PieceCID, + } + } + return &piecestore.CIDInfo{ + CID: oldCi.CID, + PieceBlockLocations: pieceBlockLocations, + }, nil +} + +// PieceInfoMigrations is the list of migrations for migrating PieceInfos +var PieceInfoMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigratePieceInfo0To1, versioning.VersionKey("1")), +} + +// CIDInfoMigrations is the list of migrations for migrating CIDInfos +var CIDInfoMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateCidInfo0To1, versioning.VersionKey("1")), +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations_cbor_gen.go new file mode 100644 index 00000000000..f7c63a490a0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/migrations/migrations_cbor_gen.go @@ -0,0 +1,507 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufPieceInfo0 = []byte{130} + +func (t *PieceInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPieceInfo0); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.Deals ([]migrations.DealInfo0) (slice) + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *PieceInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.Deals ([]migrations.DealInfo0) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]DealInfo0, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo0 + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Deals[i] = v + } + + return nil +} + +var lengthBufDealInfo0 = []byte{132} + +func (t *DealInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealInfo0); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + return nil +} + +func (t *DealInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + return nil +} + +var lengthBufBlockLocation0 = []byte{130} + +func (t *BlockLocation0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBlockLocation0); err != nil { + return err + } + + // t.RelOffset (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { + return err + } + + // t.BlockSize (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { + return err + } + + return nil +} + +func (t *BlockLocation0) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockLocation0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RelOffset (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + // t.BlockSize (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + + } + return nil +} + +var lengthBufPieceBlockLocation0 = []byte{130} + +func (t *PieceBlockLocation0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPieceBlockLocation0); err != nil { + return err + } + + // t.BlockLocation0 (migrations.BlockLocation0) (struct) + if err := t.BlockLocation0.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceBlockLocation0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceBlockLocation0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.BlockLocation0 (migrations.BlockLocation0) (struct) + + { + + if err := t.BlockLocation0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation0: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + return nil +} + +var lengthBufCIDInfo0 = []byte{130} + +func (t *CIDInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufCIDInfo0); err != nil { + return err + } + + // t.CID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.PieceBlockLocations ([]migrations.PieceBlockLocation0) (slice) + if len(t.PieceBlockLocations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { + return err + } + for _, v := range t.PieceBlockLocations { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *CIDInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = CIDInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.PieceBlockLocations ([]migrations.PieceBlockLocation0) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation0, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation0 + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types.go new file mode 100644 index 00000000000..42fdd3426f1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types.go @@ -0,0 +1,66 @@ +package piecestore + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +//go:generate cbor-gen-for --map-encoding PieceInfo DealInfo BlockLocation PieceBlockLocation CIDInfo + +// DealInfo is information about a single deal for a given piece +type DealInfo struct { + DealID abi.DealID + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize +} + +// BlockLocation is information about where a given block is relative to the overall piece +type BlockLocation struct { + RelOffset uint64 + BlockSize uint64 +} + +// PieceBlockLocation is block information along with the pieceCID of the piece the block +// is inside of +type PieceBlockLocation struct { + BlockLocation + PieceCID cid.Cid +} + +// CIDInfo is information about where a given CID will live inside a piece +type CIDInfo struct { + CID cid.Cid + PieceBlockLocations []PieceBlockLocation +} + +// CIDInfoUndefined is cid info with no information +var CIDInfoUndefined = CIDInfo{} + +// PieceInfo is metadata about a piece a provider may be storing based +// on its PieceCID -- so that, given a pieceCID during retrieval, the miner +// can determine how to unseal it if needed +type PieceInfo struct { + PieceCID cid.Cid + Deals []DealInfo +} + +// PieceInfoUndefined is piece info with no information +var PieceInfoUndefined = PieceInfo{} + +// PieceStore is a saved database of piece info that can be modified and queried +type PieceStore interface { + Start(ctx context.Context) error + OnReady(ready shared.ReadyFunc) + AddDealForPiece(pieceCID cid.Cid, dealInfo DealInfo) error + AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]BlockLocation) error + GetPieceInfo(pieceCID cid.Cid) (PieceInfo, error) + GetCIDInfo(payloadCID cid.Cid) (CIDInfo, error) + ListCidInfoKeys() ([]cid.Cid, error) + ListPieceInfoKeys() ([]cid.Cid, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types_cbor_gen.go new file mode 100644 index 00000000000..de1b75992b6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/piecestore/types_cbor_gen.go @@ -0,0 +1,736 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piecestore + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *PieceInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.Deals ([]piecestore.DealInfo) (slice) + if len("Deals") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Deals\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Deals"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Deals")); err != nil { + return err + } + + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *PieceInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.Deals ([]piecestore.DealInfo) (slice) + case "Deals": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]DealInfo, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Deals[i] = v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + if len("Offset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Offset\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Offset")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + if len("Length") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Length\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Length"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Length")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + return nil +} + +func (t *DealInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + case "Offset": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + case "Length": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *BlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.RelOffset (uint64) (uint64) + if len("RelOffset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RelOffset\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RelOffset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("RelOffset")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { + return err + } + + // t.BlockSize (uint64) (uint64) + if len("BlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BlockSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { + return err + } + + return nil +} + +func (t *BlockLocation) UnmarshalCBOR(r io.Reader) (err error) { + *t = BlockLocation{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("BlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.RelOffset (uint64) (uint64) + case "RelOffset": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + // t.BlockSize (uint64) (uint64) + case "BlockSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *PieceBlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.BlockLocation (piecestore.BlockLocation) (struct) + if len("BlockLocation") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockLocation\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BlockLocation"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockLocation")); err != nil { + return err + } + + if err := t.BlockLocation.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceBlockLocation) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceBlockLocation{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceBlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.BlockLocation (piecestore.BlockLocation) (struct) + case "BlockLocation": + + { + + if err := t.BlockLocation.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *CIDInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.CID (cid.Cid) (struct) + if len("CID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + if len("PieceBlockLocations") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceBlockLocations\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceBlockLocations"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceBlockLocations")); err != nil { + return err + } + + if len(t.PieceBlockLocations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { + return err + } + for _, v := range t.PieceBlockLocations { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *CIDInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = CIDInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("CIDInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.CID (cid.Cid) (struct) + case "CID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + case "PieceBlockLocations": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/README.md b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/README.md new file mode 100644 index 00000000000..bd7653a45b4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/README.md @@ -0,0 +1,303 @@ +# retrievalmarket +The retrievalmarket module is intended for Filecoin node implementations written in Go. +It implements functionality to allow execution of retrieval market deals on the +Filecoin network. +The node implementation must provide access to chain operations, and persistent +data storage. + +## Table of Contents +* [Background reading](#Background-reading) +* [Installation](#Installation) +* [Operation](#Operation) +* [Implementation](#Implementation) + * [Peer Resolver](#Peer_Resolver) + * [RetrievalClientNode](#RetrievalClientNode) + * [RetrievalProviderNode](#RetrievalProviderNode) +* [Construction](#Construction) + * [Construct a RetrievalClient](#RetrievalClient) + * [Construct a RetrievalProvider](#RetrievalProvider) +* [Technical Documentation](#technical-documentation) + +## Background reading +Please see the +[Filecoin Retrieval Market Specification](https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market). + +## Installation +The build process for retrievalmarket requires Go >= v1.13. + +To install: +```bash +go get github.com/filecoin-project/go-fil-markets/retrievalmarket +``` + +## Operation + +The `retrievalmarket` package provides high level APIs to execute data retrieval deals between a + retrieval client and a retrieval + provider (a.k.a. retrieval miner) on the Filecoin netwwork. + The node must implement the `PeerResolver`, `RetrievalProviderNode`, and + `RetrievalClientNode` interfaces in order to construct and use the module. + +Deals are expected to survive a node restart; deals and related information are + expected to be stored on disk. + +`retrievalmarket` communicates its deal operations and requested data via +[go-data-transfer](https://github.com/filecoin-project/go-data-transfer) using +[go-graphsync](https://github.com/ipfs/go-graphsync). + +Once required Node APIs are implemented and the retrievalmarket APIs are exposed to your desired + consumers (such as a command-line or web interface), a retrieval from the client side could +proceed roughly like so: +1. Your node has a record of data with payloadCIDs and their respective pieceCIDs. Someone, +possibly you, wants to retrieve data referenced by `paylaodCID`. +1. It calls `PeerResolver.GetPeers` to obtain a list of retrieval providers storing data + referenced by `payloadCID`. +1. It obtains retrieval deal terms by calling each retrieval miners' `Query` function. +1. The node selects the best terms for a retrieval deal and initiates a deal by calling + the retrieval client's `Retrieve` function with the selected retrieval miner and piece info. +1. The deal then proceeds automatically until all the data is returned and full payment in the + form of vouchers is made to the retrieval provider, or the deal errors. +1. Once the deal is complete and the final payment voucher is posted to chain, your client account balance + will be adjusted according to the terms of the deal. + +A retrieval from the provider side is more automated; the RetrievalProvider would be listening + for retrieval Query and Retrieve requests, and respond accordingly. + +1. Your node stores a record of what it has stored locally, or possibly a record of peers + with data. +1. Your node receives a Query for `payloadCID` and responds automatically with the terms you the + node operator have set for retrieval deals. +1. Your node receives a DealProposal for retrieval, and automatically validates and accepts or + rejects it. If accepted, the deal proceeds and your node begins sending data in pieces, stopping + every so often to request another voucher for a greater value. +1. Once the deal is complete and your node has received a voucher sufficient to cover the entire +data transfer, you the node operator may then redeem the voucher and collect FIL. + +### Collecting FIL for a deal is the node's responsibility +To collect your FIL, your node must send on-chain +messages directly to the payment channel actor to send all the vouchers, +Settle, and Collect on the deal. This will finalize the client and provider balances for the +retrieval deal on the Filecoin blockchain. Implementation and timing of these calls is the node's +responsibility and is not a part of `retrievalmarket`. For more information about how +to interact with the +payment channel actor, see the +[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) repo. + +## Implementation + +### General Steps +1. Decide if your node can be configured as a Retrieval Provider, a Retrieval Client or both. +1. Determine how and where your retrieval calls to RetrievalProvider and RetrievalClient functions + will be made. +1. Implement the required interfaces as described in this section. +1. [Construct a RetrievalClient](#RetrievalClient) in your node's startup, if your + node will be a client. +1. [Construct a RetrievalProvider](#RetrievalProvider) in your node's startup, if your + node will be a provider. +If setting up a RetrievalProvider, call its `Start` function it in the appropriate place, and its + `Stop` function in the appropriate place. +1. Expose desired `retrievalmarket` functionality to whatever internal modules desired, such as + command line interface, JSON RPC, or HTTP API. + +Implement the [`PeerResolver`](#PeerResolver), [`RetrievalProviderNode`](#RetrievalProviderNode), +and [`RetrievalClientNode`](#RetrievalClientNode) +interfaces in [retrievalmarket/types.go](./types.go), described below: + +### PeerResolver +PeerResolver is an interface for looking up providers that may have a piece of identifiable +data. Its functions are: + +#### GetPeers +```go +func GetPeers(payloadCID cid.Cid) ([]RetrievalPeer, error) +``` +Return a slice of RetrievalPeers that store the data referenced by `payloadCID`. + +--- +### RetrievalClientNode + +`RetrievalClientNode` contains the node dependencies for a RetrievalClient. Its functions are: + +* [`AllocateLane`](#AllocateLane) +* [`GetChainHead`](#GetChainHead) +* [`GetOrCreatePaymentChannel`](#GetOrCreatePaymentChannel) +* [`CreatePaymentVoucher`](#CreatePaymentVoucher) +* [`WaitForPaymentChannelAddFunds`](#WaitForPaymentChannelAddFunds) +* [`WaitForPaymentChannelCreation`](#WaitForPaymentChannelCreation) + +#### AllocateLane +```go +func AllocateLane(paymentChannel address.Address) (uint64, error) +``` + +Create a lane within `paymentChannel` so that calls to CreatePaymentVoucher will +automatically make vouchers only for the difference in total. Note that payment channel +Actors have a +[lane limit](https://github.com/filecoin-project/specs-actors/blob/0df536f7e461599c818231aa0effcdaccbb74900/actors/builtin/paych/paych_actor.go#L20). + +#### CreatePaymentVoucher +```go +func CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, + amount abi.TokenAmount, lane uint64, tok shared.TipSetToken + ) (*paych.SignedVoucher, error) +``` +Create a new payment voucher for `paymentChannel` with `amount`, for lane `lane`, given chain +state at `tok`. + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### GetOrCreatePaymentChannel +```go +func GetOrCreatePaymentChannel(ctx context.Context, clientAddress, minerAddress address.Address, + amount abi.TokenAmount, tok shared.TipSetToken + ) (address.Address, cid.Cid, error) +``` +If there is a current payment channel for deals between `clientAddress` and `minerAddress`, +add `amount` to the channel, then return the payment channel address and `cid.Undef`. + +If there isn't, construct a new payment channel actor with `amount` funds by posting +the corresponding message on chain, then return `address.Undef` and the posted message `cid.Cid`. +For more information about how to construct a payment channel actor, see +[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) + +#### WaitForPaymentChannelAddFunds +```go +func WaitForPaymentChannelAddFunds(messageCID cid.Cid) error +``` +Wait for message with CID `messageCID` on chain that funds have been sent to a payment channel. + +#### WaitForPaymentChannelCreation +```go +func WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) +``` +Wait for a message on chain with CID `messageCID` that a payment channel has been created. + +--- +### RetrievalProviderNode +`RetrievalProviderNode` contains the node dependencies for a RetrievalProvider. +Its functions are: + +* [`GetChainHead`](#GetChainHead) +* [`GetMinerWorkerAddress`](#GetMinerWorkerAddress) +* [`UnsealSector`](#UnsealSector) +* [`SavePaymentVoucher`](#SavePaymentVoucher) + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### GetMinerWorkerAddress +```go +func GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken, + ) (address.Address, error) +``` +Get the miner worker address for the given miner owner, as of `tok`. + +#### UnsealSector +```go +func UnsealSector(ctx context.Context, sectorID uint64, offset uint64, length uint64, + ) (io.ReadCloser, error) +``` +Unseal `length` data contained in `sectorID`, starting at `offset`. Return an `io.ReadCloser +` for accessing the data. + +#### SavePaymentVoucher +```go +func SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, + voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, + tok shared.TipSetToken) (abi.TokenAmount, error) +``` + +Save the provided `paych.SignedVoucher` for `paymentChannel`. The RetrievalProviderNode +implementation should validate the SignedVoucher using the provided `proof`, ` +expectedAmount`, based on the chain state referenced by `tok`. The value of the +voucher should be equal or greater than the largest previous voucher by + `expectedAmount`. It returns the actual difference. + + +## Construction +### RetrievalClient +```go +package retrievalimpl +func NewClient( + netwk network.RetrievalMarketNetwork, + bs blockstore.Blockstore, + node retrievalmarket.RetrievalClientNode, + resolver retrievalmarket.PeerResolver, + ds datastore.Batching, + storedCounter *storedcounter.StoredCounter, +) (retrievalmarket.RetrievalClient, error) +``` +#### Parameters +* `netwk rmnet.RetrievalMarketNetwork` + `RetrievalMarketNetwork` is an interface for creating and handling deal streams. To create it: + + ```go + package network + + func NewFromLibp2pHost(h host.Host) RetrievalMarketNetwork + ``` + where `h host.Host` is your node's libp2p Host. + See + [github.com/libp2p/go-libp2p-core/host](https://github.com/libp2p/go-libp2p-core/host). + +* `bs blockstore.Blockstore` is an IPFS blockstore for storing and retrieving data for deals. + See + [github.com/ipfs/go-ipfs-blockstore](github.com/ipfs/go-ipfs-blockstore). + +* `node retrievalmarket.RetrievalClientNode` is the `RetrievalClientNode` interface you have + implemented. + +* `resolver retrievalmarket.PeerResolver` is the `PeerResolver` interface you have implemented. +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + + * `storedCounter *storedcounter.StoredCounter` is a file-based stored counter used to generate new + dealIDs. See + [github.com/filecoin-project/go-storedcounter](https://github.com/filecoin-project/go-storedcounter). + +### RetrievalProvider +```go +package retrievalimpl + +func NewProvider(minerAddress address.Address, + node retrievalmarket.RetrievalProviderNode, + netwk network.RetrievalMarketNetwork, + pieceStore piecestore.PieceStore, + bs blockstore.Blockstore, + ds datastore.Batching, + ) (retrievalmarket.RetrievalProvider, error) +``` + +#### Parameters +* `minerAddress address.Address` is the address of the retrieval miner owner. +* `node retrievalmarket.RetrievalProviderNode` is the `RetrievalProviderNode` API you have implemented. +* `netwk rmnet.RetrievalMarketNetwork` is the same interface for creating and handling deal streams +as for [constructing a RetrievalClient](#RetrievalClient). +* `pieceStore piecestore.PieceStore` is the database of deals and pieces associated with them. +See this repo's [piecestore module](../piecestore). +* `bs blockstore.Blockstore` is the same interface as for +[constructing a RetrievalClient](#RetrievalClient). +* `ds datastore.Batching` is the same batching datastore interface as for +[constructing a RetrievalClient](#RetrievalClient). + +## Technical Documentation + +* [GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket) contains an architectural overview and robust API documentation + +* Retrieval Client FSM diagram: + +[![Diagram of RetrievalClientFSM](../docs/retrievalclient.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg) + + +* Retrieval Provider FSM diagram: + +[![Diagram of RetrievalClientFSM](../docs/retrievalprovider.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/client.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/client.go new file mode 100644 index 00000000000..44288ef5cc4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/client.go @@ -0,0 +1,86 @@ +package retrievalmarket + +import ( + "context" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +type PayloadCID = cid.Cid + +// BlockstoreAccessor is used by the retrieval market client to get a +// blockstore when needed, concretely to store blocks received from the provider. +// This abstraction allows the caller to provider any blockstore implementation: +// a CARv2 file, an IPFS blockstore, or something else. +type BlockstoreAccessor interface { + Get(DealID, PayloadCID) (bstore.Blockstore, error) + Done(DealID) error +} + +// ClientSubscriber is a callback that is registered to listen for retrieval events +type ClientSubscriber func(event ClientEvent, state ClientDealState) + +type RetrieveResponse struct { + DealID DealID + CarFilePath string +} + +// RetrievalClient is a client interface for making retrieval deals +type RetrievalClient interface { + + // NextID generates a new deal ID. + NextID() DealID + + // Start initializes the client by running migrations + Start(ctx context.Context) error + + // OnReady registers a listener for when the client comes on line + OnReady(shared.ReadyFunc) + + // Find Providers finds retrieval providers who may be storing a given piece + FindProviders(payloadCID cid.Cid) []RetrievalPeer + + // Query asks a provider for information about a piece it is storing + Query( + ctx context.Context, + p RetrievalPeer, + payloadCID cid.Cid, + params QueryParams, + ) (QueryResponse, error) + + // Retrieve retrieves all or part of a piece with the given retrieval parameters + Retrieve( + ctx context.Context, + id DealID, + payloadCID cid.Cid, + params Params, + totalFunds abi.TokenAmount, + p RetrievalPeer, + clientWallet address.Address, + minerWallet address.Address, + ) (DealID, error) + + // SubscribeToEvents listens for events that happen related to client retrievals + SubscribeToEvents(subscriber ClientSubscriber) Unsubscribe + + // V1 + + // TryRestartInsufficientFunds attempts to restart any deals stuck in the insufficient funds state + // after funds are added to a given payment channel + TryRestartInsufficientFunds(paymentChannel address.Address) error + + // CancelDeal attempts to cancel an inprogress deal + CancelDeal(id DealID) error + + // GetDeal returns a given deal by deal ID, if it exists + GetDeal(dealID DealID) (ClientDealState, error) + + // ListDeals returns all deals + ListDeals() (map[DealID]ClientDealState, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/common.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/common.go new file mode 100644 index 00000000000..fc2c8e05c14 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/common.go @@ -0,0 +1,22 @@ +package retrievalmarket + +import ( + "bytes" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// DecodeNode validates and computes a decoded ipld.Node selector from the +// provided cbor-encoded selector +func DecodeNode(defnode *cbg.Deferred) (ipld.Node, error) { + reader := bytes.NewReader(defnode.Raw) + nb := basicnode.Prototype.Any.NewBuilder() + err := dagcbor.Decode(nb, reader) + if err != nil { + return nil, err + } + return nb.Build(), nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/dealstatus.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/dealstatus.go new file mode 100644 index 00000000000..a685c28b168 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/dealstatus.go @@ -0,0 +1,186 @@ +package retrievalmarket + +import "fmt" + +// DealStatus is the status of a retrieval deal returned by a provider +// in a DealResponse +type DealStatus uint64 + +const ( + // DealStatusNew is a deal that nothing has happened with yet + DealStatusNew DealStatus = iota + + // DealStatusUnsealing means the provider is unsealing data + DealStatusUnsealing + + // DealStatusUnsealed means the provider has finished unsealing data + DealStatusUnsealed + + // DealStatusWaitForAcceptance means we're waiting to hear back if the provider accepted our deal + DealStatusWaitForAcceptance + + // DealStatusPaymentChannelCreating is the status set while waiting for the + // payment channel creation to complete + DealStatusPaymentChannelCreating + + // DealStatusPaymentChannelAddingFunds is the status when we are waiting for funds + // to finish being sent to the payment channel + DealStatusPaymentChannelAddingFunds + + // DealStatusAccepted means a deal has been accepted by a provider + // and its is ready to proceed with retrieval + DealStatusAccepted + + // DealStatusFundsNeededUnseal means a deal has been accepted by a provider + // and payment is needed to unseal the data + DealStatusFundsNeededUnseal + + // DealStatusFailing indicates something went wrong during a retrieval, + // and we are cleaning up before terminating with an error + DealStatusFailing + + // DealStatusRejected indicates the provider rejected a client's deal proposal + // for some reason + DealStatusRejected + + // DealStatusFundsNeeded indicates the provider needs a payment voucher to + // continue processing the deal + DealStatusFundsNeeded + + // DealStatusSendFunds indicates the client is now going to send funds because we reached the threshold of the last payment + DealStatusSendFunds + + // DealStatusSendFundsLastPayment indicates the client is now going to send final funds because + // we reached the threshold of the final payment + DealStatusSendFundsLastPayment + + // DealStatusOngoing indicates the provider is continuing to process a deal + DealStatusOngoing + + // DealStatusFundsNeededLastPayment indicates the provider needs a payment voucher + // in order to complete a deal + DealStatusFundsNeededLastPayment + + // DealStatusCompleted indicates a deal is complete + DealStatusCompleted + + // DealStatusDealNotFound indicates an update was received for a deal that could + // not be identified + DealStatusDealNotFound + + // DealStatusErrored indicates a deal has terminated in an error + DealStatusErrored + + // DealStatusBlocksComplete indicates that all blocks have been processed for the piece + DealStatusBlocksComplete + + // DealStatusFinalizing means the last payment has been received and + // we are just confirming the deal is complete + DealStatusFinalizing + + // DealStatusCompleting is just an inbetween state to perform final cleanup of + // complete deals + DealStatusCompleting + + // DealStatusCheckComplete is used for when the provided completes without a last payment + // requested cycle, to verify we have received all blocks + DealStatusCheckComplete + + // DealStatusCheckFunds means we are looking at the state of funding for the channel to determine + // if more money is incoming + DealStatusCheckFunds + + // DealStatusInsufficientFunds indicates we have depleted funds for the retrieval payment channel + // - we can resume after funds are added + DealStatusInsufficientFunds + + // DealStatusPaymentChannelAllocatingLane is the status when we are making a lane for this channel + DealStatusPaymentChannelAllocatingLane + + // DealStatusCancelling means we are cancelling an inprogress deal + DealStatusCancelling + + // DealStatusCancelled means a deal has been cancelled + DealStatusCancelled + + // DealStatusRetryLegacy means we're attempting the deal proposal for a second time using the legacy datatype + DealStatusRetryLegacy + + // DealStatusWaitForAcceptanceLegacy means we're waiting to hear the results on the legacy protocol + DealStatusWaitForAcceptanceLegacy + + // DealStatusClientWaitingForLastBlocks means that the provider has told + // the client that all blocks were sent for the deal, and the client is + // waiting for the last blocks to arrive. This should only happen when + // the deal price per byte is zero (if it's not zero the provider asks + // for final payment after sending the last blocks). + DealStatusClientWaitingForLastBlocks + + // DealStatusPaymentChannelAddingInitialFunds means that a payment channel + // exists from an earlier deal between client and provider, but we need + // to add funds to the channel for this particular deal + DealStatusPaymentChannelAddingInitialFunds + + // DealStatusErroring means that there was an error and we need to + // do some cleanup before moving to the error state + DealStatusErroring + + // DealStatusRejecting means that the deal was rejected and we need to do + // some cleanup before moving to the rejected state + DealStatusRejecting + + // DealStatusDealNotFoundCleanup means that the deal was not found and we + // need to do some cleanup before moving to the not found state + DealStatusDealNotFoundCleanup + + // DealStatusFinalizingBlockstore means that all blocks have been received, + // and the blockstore is being finalized + DealStatusFinalizingBlockstore +) + +// DealStatuses maps deal status to a human readable representation +var DealStatuses = map[DealStatus]string{ + DealStatusNew: "DealStatusNew", + DealStatusUnsealing: "DealStatusUnsealing", + DealStatusUnsealed: "DealStatusUnsealed", + DealStatusWaitForAcceptance: "DealStatusWaitForAcceptance", + DealStatusPaymentChannelCreating: "DealStatusPaymentChannelCreating", + DealStatusPaymentChannelAddingFunds: "DealStatusPaymentChannelAddingFunds", + DealStatusAccepted: "DealStatusAccepted", + DealStatusFundsNeededUnseal: "DealStatusFundsNeededUnseal", + DealStatusFailing: "DealStatusFailing", + DealStatusRejected: "DealStatusRejected", + DealStatusFundsNeeded: "DealStatusFundsNeeded", + DealStatusSendFunds: "DealStatusSendFunds", + DealStatusSendFundsLastPayment: "DealStatusSendFundsLastPayment", + DealStatusOngoing: "DealStatusOngoing", + DealStatusFundsNeededLastPayment: "DealStatusFundsNeededLastPayment", + DealStatusCompleted: "DealStatusCompleted", + DealStatusDealNotFound: "DealStatusDealNotFound", + DealStatusErrored: "DealStatusErrored", + DealStatusBlocksComplete: "DealStatusBlocksComplete", + DealStatusFinalizing: "DealStatusFinalizing", + DealStatusCompleting: "DealStatusCompleting", + DealStatusCheckComplete: "DealStatusCheckComplete", + DealStatusCheckFunds: "DealStatusCheckFunds", + DealStatusInsufficientFunds: "DealStatusInsufficientFunds", + DealStatusPaymentChannelAllocatingLane: "DealStatusPaymentChannelAllocatingLane", + DealStatusCancelling: "DealStatusCancelling", + DealStatusCancelled: "DealStatusCancelled", + DealStatusRetryLegacy: "DealStatusRetryLegacy", + DealStatusWaitForAcceptanceLegacy: "DealStatusWaitForAcceptanceLegacy", + DealStatusClientWaitingForLastBlocks: "DealStatusWaitingForLastBlocks", + DealStatusPaymentChannelAddingInitialFunds: "DealStatusPaymentChannelAddingInitialFunds", + DealStatusErroring: "DealStatusErroring", + DealStatusRejecting: "DealStatusRejecting", + DealStatusDealNotFoundCleanup: "DealStatusDealNotFoundCleanup", + DealStatusFinalizingBlockstore: "DealStatusFinalizingBlockstore", +} + +func (s DealStatus) String() string { + str, ok := DealStatuses[s] + if ok { + return str + } + return fmt.Sprintf("DealStatusUnknown - %d", s) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/defaults.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/defaults.go new file mode 100644 index 00000000000..59ed69203a1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/defaults.go @@ -0,0 +1,18 @@ +package retrievalmarket + +import "github.com/filecoin-project/go-state-types/abi" + +// DefaultPricePerByte is the charge per byte retrieved if the miner does +// not specifically set it +var DefaultPricePerByte = abi.NewTokenAmount(2) + +// DefaultUnsealPrice is the default charge to unseal a sector for retrieval +var DefaultUnsealPrice = abi.NewTokenAmount(0) + +// DefaultPaymentInterval is the baseline interval, set to 1Mb +// if the miner does not explicitly set it otherwise +var DefaultPaymentInterval = uint64(1 << 20) + +// DefaultPaymentIntervalIncrease is the amount interval increases on each payment, +// set to to 1Mb if the miner does not explicitly set it otherwise +var DefaultPaymentIntervalIncrease = uint64(1 << 20) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/doc.go new file mode 100644 index 00000000000..06a445ea52d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/doc.go @@ -0,0 +1,108 @@ +/* +Package retrievalmarket implements the Filecoin retrieval protocol. + +An overview of the retrieval protocol can be found in the Filecoin specification: + +https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market + +The following architectural components provide a brief overview of the design of +the retrieval market module: + +Public Interfaces And Node Dependencies + +While retrieval deals primarily happen off-chain, there are some chain operations +that must be performed by a Filecoin node implementation. The module is intended to separate +the primarily off-chain retrieval deal flow from the on-chain operations related primarily +to payment channels, the mechanism for getting paid for retrieval deals. + +As such for both the client and the provider in the retrieval market, the module defines a top level +public interface which it provides an implementation for, and a node interface that must be implemented +by the Filecoin node itself, and provided as a dependency. These node interfaces provide a universal way to +talk to potentially multiple different Filecoin node implementations, and can be implemented as using HTTP +or other interprocess communication to talk to a node implementation running in a different process. + +The top level interfaces this package implements are RetrievalClient & RetrievalProvider. The dependencies the Filecoin +node is expected to implement are RetrievalClientNode & RetrievalProviderNode. Further documentation of exactly what those +dependencies should do can be found in the readme. + +Finite State Machines + +While retrieval deals in general should be fairly fast, making a retrieval deal is still an asynchronous process. +As documented in the Filecoin spec, the basic architecture of the Filecoin retrieval protocol is incremental payments. +Because neither client nor provider trust each other, we bootstrap trust by essentially paying in small increments as we receive +data. The client only sends payment when it verifies data and the provider only sends more data when it receives payment. +Not surprisingly, many things can go wrong along the way. To manage this back and forth asynchronous process, +we use finite state machines that update deal state when discrete events occur. State updates +always persist state to disk. This means we have a permanent record of exactly what's going on with deals at any time, +and we can ideally survive our Filecoin processes shutting down and restarting. + +The following diagrams visualize the statemachine flows for the client and the provider: + +Client FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg + +Provider FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg + +Identifying Retrieval Providers + +The RetrievalClient provides two functions to locate a provider from which to retrieve data. + +`FindProviders` returns a list of retrieval peers who may have the data your looking for. FindProviders delegates its work to +an implementation of the PeerResolver interface. + +`Query` queries a specific retrieval provider to find out definitively if they have the requested data and if so, the +parameters they will accept for a retrieval deal. + +Deal Flow + +The primary mechanism for initiating storage deals is the `Retrieve` method on the RetrievalClient. + +When `Retrieve` is called, it allocates a new DealID from its stored counter, constructs a DealProposal, sends +the deal proposal to the provider, initiates tracking of deal state and hands the deal to the Client FSM, +and returns the DealID which constitutes the identifier for that deal. + +The Retrieval provider receives the deal in `HandleDealStream`. `HandleDealStream` initiates tracking of deal state +on the Provider side and hands the deal to the Provider FSM, which handles the rest of deal flow. + +From this point forward, deal negotiation is completely asynchronous and runs in the FSMs. + +A user of the modules can monitor deal progress through `SubscribeToEvents` methods on RetrievalClient and RetrievalProvider, +or by simply calling `ListDeals` to get all deal statuses. + +The FSMs implement every remaining step in deal negotiation. Importantly, the RetrievalProvider delegates unsealing sectors +back to the node via the `UnsealSector` method (the node itself likely delegates management of sectors and sealing to an +implementation of the Storage Mining subsystem of the Filecoin spec). Sectors are unsealed on an as needed basis using +the `PieceStore` to locate sectors that contain data related to the deal. + +Major Dependencies + +Other libraries in go-fil-markets: + +https://github.com/filecoin-project/go-fil-markets/tree/master/piecestore - used to locate data for deals in sectors +https://github.com/filecoin-project/go-fil-markets/tree/master/shared - types and utility functions shared with +storagemarket package + +Other Filecoin Repos: + +https://github.com/filecoin-project/go-data-transfer - for transferring data, via go-graphsync +https://github.com/filecoin-project/go-statemachine - a finite state machine that tracks deal state +https://github.com/filecoin-project/go-storedcounter - for generating and persisting unique deal IDs +https://github.com/filecoin-project/specs-actors - the Filecoin actors + +IPFS Project Repos: + +https://github.com/ipfs/go-graphsync - used by go-data-transfer +https://github.com/ipfs/go-datastore - for persisting statemachine state for deals +https://github.com/ipfs/go-ipfs-blockstore - for storing and retrieving block data for deals + +Other Repos: + +https://github.com/libp2p/go-libp2p) the network over which retrieval deal data is exchanged. +https://github.com/hannahhoward/go-pubsub - for pub/sub notifications external to the statemachine + +Root package + +This top level package defines top level enumerations and interfaces. The primary implementation +lives in the `impl` directory + +*/ +package retrievalmarket diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/events.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/events.go new file mode 100644 index 00000000000..7c61c098291 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/events.go @@ -0,0 +1,281 @@ +package retrievalmarket + +import "fmt" + +// ClientEvent is an event that occurs in a deal lifecycle on the client +type ClientEvent uint64 + +const ( + // ClientEventOpen indicates a deal was initiated + ClientEventOpen ClientEvent = iota + + // ClientEventWriteDealProposalErrored means a network error writing a deal proposal + ClientEventWriteDealProposalErrored + + // ClientEventDealProposed means a deal was successfully sent to a miner + ClientEventDealProposed + + // ClientEventDealRejected means a deal was rejected by the provider + ClientEventDealRejected + + // ClientEventDealNotFound means a provider could not find a piece for a deal + ClientEventDealNotFound + + // ClientEventDealAccepted means a provider accepted a deal + ClientEventDealAccepted + + // ClientEventProviderCancelled means a provider has sent a message to cancel a deal + ClientEventProviderCancelled + + // ClientEventUnknownResponseReceived means a client received a response it doesn't + // understand from the provider + ClientEventUnknownResponseReceived + + // ClientEventPaymentChannelErrored means there was a failure creating a payment channel + ClientEventPaymentChannelErrored + + // ClientEventAllocateLaneErrored means there was a failure creating a lane in a payment channel + ClientEventAllocateLaneErrored + + // ClientEventPaymentChannelCreateInitiated means we are waiting for a message to + // create a payment channel to appear on chain + ClientEventPaymentChannelCreateInitiated + + // ClientEventPaymentChannelReady means the newly created payment channel is ready for the + // deal to resume + ClientEventPaymentChannelReady + + // ClientEventPaymentChannelAddingFunds mean we are waiting for funds to be + // added to a payment channel + ClientEventPaymentChannelAddingFunds + + // ClientEventPaymentChannelAddFundsErrored means that adding funds to the payment channel + // failed + ClientEventPaymentChannelAddFundsErrored + + // ClientEventLastPaymentRequested indicates the provider requested a final payment + ClientEventLastPaymentRequested + + // ClientEventAllBlocksReceived indicates the provider has sent all blocks + ClientEventAllBlocksReceived + + // ClientEventPaymentRequested indicates the provider requested a payment + ClientEventPaymentRequested + + // ClientEventUnsealPaymentRequested indicates the provider requested a payment for unsealing the sector + ClientEventUnsealPaymentRequested + + // ClientEventBlocksReceived indicates the provider has sent blocks + ClientEventBlocksReceived + + // ClientEventSendFunds emits when we reach the threshold to send the next payment + ClientEventSendFunds + + // ClientEventFundsExpended indicates a deal has run out of funds in the payment channel + // forcing the client to add more funds to continue the deal + ClientEventFundsExpended // when totalFunds is expended + + // ClientEventBadPaymentRequested indicates the provider asked for funds + // in a way that does not match the terms of the deal + ClientEventBadPaymentRequested + + // ClientEventCreateVoucherFailed indicates an error happened creating a payment voucher + ClientEventCreateVoucherFailed + + // ClientEventWriteDealPaymentErrored indicates a network error trying to write a payment + ClientEventWriteDealPaymentErrored + + // ClientEventPaymentSent indicates a payment was sent to the provider + ClientEventPaymentSent + + // ClientEventComplete is fired when the provider sends a message + // indicating that a deal has completed + ClientEventComplete + + // ClientEventDataTransferError emits when something go wrong at the data transfer level + ClientEventDataTransferError + + // ClientEventCancelComplete happens when a deal cancellation is transmitted to the provider + ClientEventCancelComplete + + // ClientEventEarlyTermination indications a provider send a deal complete without sending all data + ClientEventEarlyTermination + + // ClientEventCompleteVerified means that a provider completed without requesting a final payment but + // we verified we received all data + ClientEventCompleteVerified + + // ClientEventLaneAllocated is called when a lane is allocated + ClientEventLaneAllocated + + // ClientEventVoucherShortfall means we tried to create a voucher but did not have enough funds in channel + // to create it + ClientEventVoucherShortfall + + // ClientEventRecheckFunds runs when an external caller indicates there may be new funds in a payment channel + ClientEventRecheckFunds + + // ClientEventCancel runs when a user cancels a deal + ClientEventCancel + + // ClientEventWaitForLastBlocks is fired when the provider has told + // the client that all blocks were sent for the deal, and the client is + // waiting for the last blocks to arrive + ClientEventWaitForLastBlocks + + // ClientEventPaymentChannelSkip is fired when the total deal price is zero + // so there's no need to set up a payment channel + ClientEventPaymentChannelSkip + + // ClientEventPaymentNotSent indicates that payment was requested, but no + // payment was actually due, so a voucher was not sent to the provider + ClientEventPaymentNotSent + + // ClientEventBlockstoreFinalized is fired when the blockstore has been + // finalized after receiving all blocks + ClientEventBlockstoreFinalized + + // ClientEventFinalizeBlockstoreErrored is fired when there is an error + // finalizing the blockstore + ClientEventFinalizeBlockstoreErrored +) + +// ClientEvents is a human readable map of client event name -> event description +var ClientEvents = map[ClientEvent]string{ + ClientEventOpen: "ClientEventOpen", + ClientEventPaymentChannelErrored: "ClientEventPaymentChannelErrored", + ClientEventDealProposed: "ClientEventDealProposed", + ClientEventAllocateLaneErrored: "ClientEventAllocateLaneErrored", + ClientEventPaymentChannelCreateInitiated: "ClientEventPaymentChannelCreateInitiated", + ClientEventPaymentChannelReady: "ClientEventPaymentChannelReady", + ClientEventPaymentChannelAddingFunds: "ClientEventPaymentChannelAddingFunds", + ClientEventPaymentChannelAddFundsErrored: "ClientEventPaymentChannelAddFundsErrored", + ClientEventWriteDealProposalErrored: "ClientEventWriteDealProposalErrored", + ClientEventDealRejected: "ClientEventDealRejected", + ClientEventDealNotFound: "ClientEventDealNotFound", + ClientEventDealAccepted: "ClientEventDealAccepted", + ClientEventProviderCancelled: "ClientEventProviderCancelled", + ClientEventUnknownResponseReceived: "ClientEventUnknownResponseReceived", + ClientEventLastPaymentRequested: "ClientEventLastPaymentRequested", + ClientEventAllBlocksReceived: "ClientEventAllBlocksReceived", + ClientEventPaymentRequested: "ClientEventPaymentRequested", + ClientEventUnsealPaymentRequested: "ClientEventUnsealPaymentRequested", + ClientEventBlocksReceived: "ClientEventBlocksReceived", + ClientEventSendFunds: "ClientEventSendFunds", + ClientEventFundsExpended: "ClientEventFundsExpended", + ClientEventBadPaymentRequested: "ClientEventBadPaymentRequested", + ClientEventCreateVoucherFailed: "ClientEventCreateVoucherFailed", + ClientEventWriteDealPaymentErrored: "ClientEventWriteDealPaymentErrored", + ClientEventPaymentSent: "ClientEventPaymentSent", + ClientEventDataTransferError: "ClientEventDataTransferError", + ClientEventComplete: "ClientEventComplete", + ClientEventCancelComplete: "ClientEventCancelComplete", + ClientEventEarlyTermination: "ClientEventEarlyTermination", + ClientEventCompleteVerified: "ClientEventCompleteVerified", + ClientEventLaneAllocated: "ClientEventLaneAllocated", + ClientEventVoucherShortfall: "ClientEventVoucherShortfall", + ClientEventRecheckFunds: "ClientEventRecheckFunds", + ClientEventCancel: "ClientEventCancel", + ClientEventWaitForLastBlocks: "ClientEventWaitForLastBlocks", + ClientEventPaymentChannelSkip: "ClientEventPaymentChannelSkip", + ClientEventPaymentNotSent: "ClientEventPaymentNotSent", + ClientEventBlockstoreFinalized: "ClientEventBlockstoreFinalized", + ClientEventFinalizeBlockstoreErrored: "ClientEventFinalizeBlockstoreErrored", +} + +func (e ClientEvent) String() string { + s, ok := ClientEvents[e] + if ok { + return s + } + return fmt.Sprintf("ClientEventUnknown: %d", e) +} + +// ProviderEvent is an event that occurs in a deal lifecycle on the provider +type ProviderEvent uint64 + +const ( + // ProviderEventOpen indicates a new deal was received from a client + ProviderEventOpen ProviderEvent = iota + + // ProviderEventDealNotFound happens when the provider cannot find the piece for the + // deal proposed by the client + ProviderEventDealNotFound + + // ProviderEventDealRejected happens when a provider rejects a deal proposed + // by the client + ProviderEventDealRejected + + // ProviderEventDealAccepted happens when a provider accepts a deal + ProviderEventDealAccepted + + // ProviderEventBlockSent happens when the provider reads another block + // in the piece + ProviderEventBlockSent + + // ProviderEventBlocksCompleted happens when the provider reads the last block + // in the piece + ProviderEventBlocksCompleted + + // ProviderEventPaymentRequested happens when a provider asks for payment from + // a client for blocks sent + ProviderEventPaymentRequested + + // ProviderEventSaveVoucherFailed happens when an attempt to save a payment + // voucher fails + ProviderEventSaveVoucherFailed + + // ProviderEventPartialPaymentReceived happens when a provider receives and processes + // a payment that is less than what was requested to proceed with the deal + ProviderEventPartialPaymentReceived + + // ProviderEventPaymentReceived happens when a provider receives a payment + // and resumes processing a deal + ProviderEventPaymentReceived + + // ProviderEventComplete indicates a retrieval deal was completed for a client + ProviderEventComplete + + // ProviderEventUnsealError emits when something wrong occurs while unsealing data + ProviderEventUnsealError + + // ProviderEventUnsealComplete emits when the unsealing process is done + ProviderEventUnsealComplete + + // ProviderEventDataTransferError emits when something go wrong at the data transfer level + ProviderEventDataTransferError + + // ProviderEventCancelComplete happens when a deal cancellation is transmitted to the provider + ProviderEventCancelComplete + + // ProviderEventCleanupComplete happens when a deal is finished cleaning up and enters a complete state + ProviderEventCleanupComplete + + // ProviderEventMultiStoreError occurs when an error happens attempting to operate on the multistore + ProviderEventMultiStoreError + + // ProviderEventClientCancelled happens when the provider gets a cancel message from the client's data transfer + ProviderEventClientCancelled +) + +// ProviderEvents is a human readable map of provider event name -> event description +var ProviderEvents = map[ProviderEvent]string{ + ProviderEventOpen: "ProviderEventOpen", + ProviderEventDealNotFound: "ProviderEventDealNotFound", + ProviderEventDealRejected: "ProviderEventDealRejected", + ProviderEventDealAccepted: "ProviderEventDealAccepted", + ProviderEventBlockSent: "ProviderEventBlockSent", + ProviderEventBlocksCompleted: "ProviderEventBlocksCompleted", + ProviderEventPaymentRequested: "ProviderEventPaymentRequested", + ProviderEventSaveVoucherFailed: "ProviderEventSaveVoucherFailed", + ProviderEventPartialPaymentReceived: "ProviderEventPartialPaymentReceived", + ProviderEventPaymentReceived: "ProviderEventPaymentReceived", + ProviderEventComplete: "ProviderEventComplete", + ProviderEventUnsealError: "ProviderEventUnsealError", + ProviderEventUnsealComplete: "ProviderEventUnsealComplete", + ProviderEventDataTransferError: "ProviderEventDataTransferError", + ProviderEventCancelComplete: "ProviderEventCancelComplete", + ProviderEventCleanupComplete: "ProviderEventCleanupComplete", + ProviderEventMultiStoreError: "ProviderEventMultiStoreError", + ProviderEventClientCancelled: "ProviderEventClientCancelled", +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl.go new file mode 100644 index 00000000000..0ccbfe6af6b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl.go @@ -0,0 +1,129 @@ +package askstore + +import ( + "bytes" + "context" + "sync" + + "github.com/ipfs/go-datastore" + "golang.org/x/xerrors" + + cborutil "github.com/filecoin-project/go-cbor-util" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedds "github.com/filecoin-project/go-ds-versioning/pkg/datastore" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +// AskStoreImpl implements AskStore, persisting a retrieval Ask +// to disk. It also maintains a cache of the current Ask in memory +type AskStoreImpl struct { + lk sync.RWMutex + ask *retrievalmarket.Ask + ds datastore.Batching + key datastore.Key +} + +// NewAskStore returns a new instance of AskStoreImpl +// It will initialize a new default ask and store it if one is not set. +// Otherwise it loads the current Ask from disk +func NewAskStore(ds datastore.Batching, key datastore.Key) (*AskStoreImpl, error) { + askMigrations, err := migrations.AskMigrations.Build() + if err != nil { + return nil, err + } + versionedDs, migrateDs := versionedds.NewVersionedDatastore(ds, askMigrations, versioning.VersionKey("1")) + err = migrateDs(context.TODO()) + if err != nil { + return nil, err + } + s := &AskStoreImpl{ + ds: versionedDs, + key: key, + } + + if err := s.tryLoadAsk(); err != nil { + return nil, err + } + + if s.ask == nil { + // for now set a default retrieval ask + defaultAsk := &retrievalmarket.Ask{ + PricePerByte: retrievalmarket.DefaultPricePerByte, + UnsealPrice: retrievalmarket.DefaultUnsealPrice, + PaymentInterval: retrievalmarket.DefaultPaymentInterval, + PaymentIntervalIncrease: retrievalmarket.DefaultPaymentIntervalIncrease, + } + + if err := s.SetAsk(defaultAsk); err != nil { + return nil, xerrors.Errorf("failed setting a default retrieval ask: %w", err) + } + } + return s, nil +} + +// SetAsk stores retrieval provider's ask +func (s *AskStoreImpl) SetAsk(ask *retrievalmarket.Ask) error { + s.lk.Lock() + defer s.lk.Unlock() + + return s.saveAsk(ask) +} + +// GetAsk returns the current retrieval ask, or nil if one does not exist. +func (s *AskStoreImpl) GetAsk() *retrievalmarket.Ask { + s.lk.RLock() + defer s.lk.RUnlock() + if s.ask == nil { + return nil + } + ask := *s.ask + return &ask +} + +func (s *AskStoreImpl) tryLoadAsk() error { + s.lk.Lock() + defer s.lk.Unlock() + + err := s.loadAsk() + + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + // this is expected + return nil + } + return err + } + + return nil +} + +func (s *AskStoreImpl) loadAsk() error { + askb, err := s.ds.Get(context.TODO(), s.key) + if err != nil { + return xerrors.Errorf("failed to load most recent retrieval ask from disk: %w", err) + } + + var ask retrievalmarket.Ask + if err := cborutil.ReadCborRPC(bytes.NewReader(askb), &ask); err != nil { + return err + } + + s.ask = &ask + return nil +} + +func (s *AskStoreImpl) saveAsk(a *retrievalmarket.Ask) error { + b, err := cborutil.Dump(a) + if err != nil { + return err + } + + if err := s.ds.Put(context.TODO(), s.key, b); err != nil { + return err + } + + s.ask = a + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl_test.go new file mode 100644 index 00000000000..9db56526455 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/askstore/askstore_impl_test.go @@ -0,0 +1,76 @@ +package askstore_test + +import ( + "bytes" + "context" + "math/rand" + "testing" + + "github.com/ipfs/go-datastore" + dss "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/askstore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +func TestAskStoreImpl(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + store, err := askstore.NewAskStore(ds, datastore.NewKey("retrieval-ask")) + require.NoError(t, err) + + // A new store returns the default ask + ask := store.GetAsk() + require.NotNil(t, ask) + + require.Equal(t, retrievalmarket.DefaultUnsealPrice, ask.UnsealPrice) + require.Equal(t, retrievalmarket.DefaultPricePerByte, ask.PricePerByte) + require.Equal(t, retrievalmarket.DefaultPaymentInterval, ask.PaymentInterval) + require.Equal(t, retrievalmarket.DefaultPaymentIntervalIncrease, ask.PaymentIntervalIncrease) + + // Store a new ask + newAsk := &retrievalmarket.Ask{ + PricePerByte: abi.NewTokenAmount(123), + UnsealPrice: abi.NewTokenAmount(456), + PaymentInterval: 789, + PaymentIntervalIncrease: 789, + } + err = store.SetAsk(newAsk) + require.NoError(t, err) + + // Fetch new ask + stored := store.GetAsk() + require.Equal(t, newAsk, stored) + + // Construct a new AskStore and make sure it returns the previously-stored ask + newStore, err := askstore.NewAskStore(ds, datastore.NewKey("retrieval-ask")) + require.NoError(t, err) + stored = newStore.GetAsk() + require.Equal(t, newAsk, stored) +} +func TestMigrations(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + oldAsk := &migrations.Ask0{ + PricePerByte: abi.NewTokenAmount(rand.Int63()), + UnsealPrice: abi.NewTokenAmount(rand.Int63()), + PaymentInterval: rand.Uint64(), + PaymentIntervalIncrease: rand.Uint64(), + } + buf := new(bytes.Buffer) + err := oldAsk.MarshalCBOR(buf) + require.NoError(t, err) + ds.Put(context.TODO(), datastore.NewKey("retrieval-ask"), buf.Bytes()) + newStore, err := askstore.NewAskStore(ds, datastore.NewKey("retrieval-ask")) + require.NoError(t, err) + ask := newStore.GetAsk() + expectedAsk := &retrievalmarket.Ask{ + PricePerByte: oldAsk.PricePerByte, + UnsealPrice: oldAsk.UnsealPrice, + PaymentInterval: oldAsk.PaymentInterval, + PaymentIntervalIncrease: oldAsk.PaymentIntervalIncrease, + } + require.Equal(t, expectedAsk, ask) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client.go new file mode 100644 index 00000000000..06c6ef96101 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client.go @@ -0,0 +1,501 @@ +package retrievalimpl + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + bstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log/v2" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/discovery" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var log = logging.Logger("retrieval") + +// Client is the production implementation of the RetrievalClient interface +type Client struct { + network rmnet.RetrievalMarketNetwork + dataTransfer datatransfer.Manager + node retrievalmarket.RetrievalClientNode + dealIDGen *shared.TimeCounter + + subscribers *pubsub.PubSub + readySub *pubsub.PubSub + resolver discovery.PeerResolver + stateMachines fsm.Group + migrateStateMachines func(context.Context) error + bstores retrievalmarket.BlockstoreAccessor + + // Guards concurrent access to Retrieve method + retrieveLk sync.Mutex +} + +type internalEvent struct { + evt retrievalmarket.ClientEvent + state retrievalmarket.ClientDealState +} + +func dispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error { + ie, ok := evt.(internalEvent) + if !ok { + return errors.New("wrong type of event") + } + cb, ok := subscriberFn.(retrievalmarket.ClientSubscriber) + if !ok { + return errors.New("wrong type of event") + } + log.Debugw("process retrieval client listeners", "name", retrievalmarket.ClientEvents[ie.evt], "proposal cid", ie.state.ID) + cb(ie.evt, ie.state) + return nil +} + +var _ retrievalmarket.RetrievalClient = &Client{} + +// NewClient creates a new retrieval client +func NewClient( + network rmnet.RetrievalMarketNetwork, + dataTransfer datatransfer.Manager, + node retrievalmarket.RetrievalClientNode, + resolver discovery.PeerResolver, + ds datastore.Batching, + ba retrievalmarket.BlockstoreAccessor, +) (retrievalmarket.RetrievalClient, error) { + c := &Client{ + network: network, + dataTransfer: dataTransfer, + node: node, + resolver: resolver, + dealIDGen: shared.NewTimeCounter(), + subscribers: pubsub.New(dispatcher), + readySub: pubsub.New(shared.ReadyDispatcher), + bstores: ba, + } + retrievalMigrations, err := migrations.ClientMigrations.Build() + if err != nil { + return nil, err + } + c.stateMachines, c.migrateStateMachines, err = versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: &clientDealEnvironment{c}, + StateType: retrievalmarket.ClientDealState{}, + StateKeyField: "Status", + Events: clientstates.ClientEvents, + StateEntryFuncs: clientstates.ClientStateEntryFuncs, + FinalityStates: clientstates.ClientFinalityStates, + Notifier: c.notifySubscribers, + }, retrievalMigrations, "2") + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherResultType(&retrievalmarket.DealResponse{}) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherResultType(&migrations.DealResponse0{}) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherType(&retrievalmarket.DealProposal{}, nil) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherType(&migrations.DealProposal0{}, nil) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherType(&retrievalmarket.DealPayment{}, nil) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterVoucherType(&migrations.DealPayment0{}, nil) + if err != nil { + return nil, err + } + dataTransfer.SubscribeToEvents(dtutils.ClientDataTransferSubscriber(c.stateMachines)) + transportConfigurer := dtutils.TransportConfigurer(network.ID(), &clientStoreGetter{c}) + err = dataTransfer.RegisterTransportConfigurer(&retrievalmarket.DealProposal{}, transportConfigurer) + if err != nil { + return nil, err + } + err = dataTransfer.RegisterTransportConfigurer(&migrations.DealProposal0{}, transportConfigurer) + if err != nil { + return nil, err + } + return c, nil +} + +func (c *Client) NextID() retrievalmarket.DealID { + return retrievalmarket.DealID(c.dealIDGen.Next()) +} + +// Start initialized the Client, performing relevant database migrations +func (c *Client) Start(ctx context.Context) error { + go func() { + err := c.migrateStateMachines(ctx) + if err != nil { + log.Errorf("Migrating retrieval client state machines: %s", err.Error()) + } + + err = c.readySub.Publish(err) + if err != nil { + log.Warnf("Publish retrieval client ready event: %s", err.Error()) + } + }() + return nil +} + +// OnReady registers a listener for when the client has finished starting up +func (c *Client) OnReady(ready shared.ReadyFunc) { + c.readySub.Subscribe(ready) +} + +// FindProviders uses PeerResolver interface to locate a list of providers who may have a given payload CID. +func (c *Client) FindProviders(payloadCID cid.Cid) []retrievalmarket.RetrievalPeer { + peers, err := c.resolver.GetPeers(payloadCID) + if err != nil { + log.Errorf("failed to get peers: %s", err) + return []retrievalmarket.RetrievalPeer{} + } + return peers +} + +/* +Query sends a retrieval query to a specific retrieval provider, to determine +if the provider can serve a retrieval request and what its specific parameters for +the request are. + +The client creates a new `RetrievalQueryStream` for the chosen peer ID, +and calls `WriteQuery` on it, which constructs a data-transfer message and writes it to the Query stream. +*/ +func (c *Client) Query(ctx context.Context, p retrievalmarket.RetrievalPeer, payloadCID cid.Cid, params retrievalmarket.QueryParams) (retrievalmarket.QueryResponse, error) { + err := c.addMultiaddrs(ctx, p) + if err != nil { + log.Warn(err) + return retrievalmarket.QueryResponseUndefined, err + } + s, err := c.network.NewQueryStream(p.ID) + if err != nil { + log.Warn(err) + return retrievalmarket.QueryResponseUndefined, err + } + defer s.Close() + + err = s.WriteQuery(retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: params, + }) + if err != nil { + log.Warn(err) + return retrievalmarket.QueryResponseUndefined, err + } + + return s.ReadQueryResponse() +} + +// Retrieve initiates the retrieval deal flow, which involves multiple requests and responses +// +// To start this processes, the client creates a new `RetrievalDealStream`. Currently, this connection is +// kept open through the entire deal until completion or failure. Make deals pauseable as well as surviving +// a restart is a planned future feature. +// +// Retrieve should be called after using FindProviders and Query are used to identify an appropriate provider to +// retrieve the deal from. The parameters identified in Query should be passed to Retrieve to ensure the +// greatest likelihood the provider will accept the deal +// +// When called, the client takes the following actions: +// +// 1. Creates a deal ID using the next value from its `storedCounter`. +// +// 2. Constructs a `DealProposal` with deal terms +// +// 3. Tells its statemachine to begin tracking this deal state by dealID. +// +// 4. Constructs a `blockio.SelectorVerifier` and adds it to its dealID-keyed map of block verifiers. +// +// 5. Triggers a `ClientEventOpen` event on its statemachine. +// +// From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling +// `SubscribeToEvents` on the Client. The Client handles consuming blocks it receives from the provider, via `ConsumeBlocks` function +// +// Retrieve can use an ID generated through NextID, or can generate an ID if the user passes a zero value. +// +// Use NextID when it's necessary to reserve an ID ahead of time, e.g. to +// associate it with a given blockstore in the BlockstoreAccessor. +// +// Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates +func (c *Client) Retrieve( + ctx context.Context, + id retrievalmarket.DealID, + payloadCID cid.Cid, + params retrievalmarket.Params, + totalFunds abi.TokenAmount, + p retrievalmarket.RetrievalPeer, + clientWallet address.Address, + minerWallet address.Address, +) (retrievalmarket.DealID, error) { + c.retrieveLk.Lock() + defer c.retrieveLk.Unlock() + + // Check if there's already an active retrieval deal with the same peer + // for the same payload CID + err := c.checkForActiveDeal(payloadCID, p.ID) + if err != nil { + return 0, err + } + + err = c.addMultiaddrs(ctx, p) + if err != nil { + return 0, err + } + + // assign a new ID. + if id == 0 { + next := c.dealIDGen.Next() + id = retrievalmarket.DealID(next) + } + + dealState := retrievalmarket.ClientDealState{ + DealProposal: retrievalmarket.DealProposal{ + PayloadCID: payloadCID, + ID: id, + Params: params, + }, + TotalFunds: totalFunds, + ClientWallet: clientWallet, + MinerWallet: minerWallet, + TotalReceived: 0, + CurrentInterval: params.PaymentInterval, + BytesPaidFor: 0, + PaymentRequested: abi.NewTokenAmount(0), + FundsSpent: abi.NewTokenAmount(0), + Status: retrievalmarket.DealStatusNew, + Sender: p.ID, + UnsealFundsPaid: big.Zero(), + } + + // start the deal processing + err = c.stateMachines.Begin(dealState.ID, &dealState) + if err != nil { + return 0, err + } + + err = c.stateMachines.Send(dealState.ID, retrievalmarket.ClientEventOpen) + if err != nil { + return 0, err + } + + return id, nil +} + +// Check if there's already an active retrieval deal with the same peer +// for the same payload CID +func (c *Client) checkForActiveDeal(payloadCID cid.Cid, pid peer.ID) error { + var deals []retrievalmarket.ClientDealState + err := c.stateMachines.List(&deals) + if err != nil { + return err + } + + for _, deal := range deals { + match := deal.Sender == pid && deal.PayloadCID == payloadCID + active := !clientstates.IsFinalityState(deal.Status) + if match && active { + msg := fmt.Sprintf("there is an active retrieval deal with peer %s ", pid) + msg += fmt.Sprintf("for payload CID %s ", payloadCID) + msg += fmt.Sprintf("(retrieval deal ID %d, state %s) - ", + deal.ID, retrievalmarket.DealStatuses[deal.Status]) + msg += "existing deal must be cancelled before starting a new retrieval deal" + err := xerrors.Errorf(msg) + return err + } + } + return nil +} + +func (c *Client) notifySubscribers(eventName fsm.EventName, state fsm.StateType) { + evt := eventName.(retrievalmarket.ClientEvent) + ds := state.(retrievalmarket.ClientDealState) + _ = c.subscribers.Publish(internalEvent{evt, ds}) +} + +func (c *Client) addMultiaddrs(ctx context.Context, p retrievalmarket.RetrievalPeer) error { + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return err + } + maddrs, err := c.node.GetKnownAddresses(ctx, p, tok) + if err != nil { + return err + } + if len(maddrs) > 0 { + c.network.AddAddrs(p.ID, maddrs) + } + return nil +} + +// SubscribeToEvents allows another component to listen for events on the RetrievalClient +// in order to track deals as they progress through the deal flow +func (c *Client) SubscribeToEvents(subscriber retrievalmarket.ClientSubscriber) retrievalmarket.Unsubscribe { + return retrievalmarket.Unsubscribe(c.subscribers.Subscribe(subscriber)) +} + +// V1 + +// TryRestartInsufficientFunds attempts to restart any deals stuck in the insufficient funds state +// after funds are added to a given payment channel +func (c *Client) TryRestartInsufficientFunds(paymentChannel address.Address) error { + var deals []retrievalmarket.ClientDealState + err := c.stateMachines.List(&deals) + if err != nil { + return err + } + for _, deal := range deals { + if deal.Status == retrievalmarket.DealStatusInsufficientFunds && deal.PaymentInfo.PayCh == paymentChannel { + if err := c.stateMachines.Send(deal.ID, retrievalmarket.ClientEventRecheckFunds); err != nil { + return err + } + } + } + return nil +} + +// CancelDeal attempts to cancel an in progress deal +func (c *Client) CancelDeal(dealID retrievalmarket.DealID) error { + return c.stateMachines.Send(dealID, retrievalmarket.ClientEventCancel) +} + +// GetDeal returns a given deal by deal ID, if it exists +func (c *Client) GetDeal(dealID retrievalmarket.DealID) (retrievalmarket.ClientDealState, error) { + var out retrievalmarket.ClientDealState + if err := c.stateMachines.Get(dealID).Get(&out); err != nil { + return retrievalmarket.ClientDealState{}, err + } + return out, nil +} + +// ListDeals lists all known retrieval deals +func (c *Client) ListDeals() (map[retrievalmarket.DealID]retrievalmarket.ClientDealState, error) { + var deals []retrievalmarket.ClientDealState + err := c.stateMachines.List(&deals) + if err != nil { + return nil, err + } + dealMap := make(map[retrievalmarket.DealID]retrievalmarket.ClientDealState) + for _, deal := range deals { + dealMap[deal.ID] = deal + } + return dealMap, nil +} + +var _ clientstates.ClientDealEnvironment = &clientDealEnvironment{} + +type clientDealEnvironment struct { + c *Client +} + +// Node returns the node interface for this deal +func (c *clientDealEnvironment) Node() retrievalmarket.RetrievalClientNode { + return c.c.node +} + +func (c *clientDealEnvironment) OpenDataTransfer(ctx context.Context, to peer.ID, proposal *retrievalmarket.DealProposal, legacy bool) (datatransfer.ChannelID, error) { + sel := selectorparse.CommonSelector_ExploreAllRecursively + if proposal.SelectorSpecified() { + var err error + sel, err = retrievalmarket.DecodeNode(proposal.Selector) + if err != nil { + return datatransfer.ChannelID{}, xerrors.Errorf("selector is invalid: %w", err) + } + } + + var vouch datatransfer.Voucher = proposal + if legacy { + vouch = &migrations.DealProposal0{ + PayloadCID: proposal.PayloadCID, + ID: proposal.ID, + Params0: migrations.Params0{ + Selector: proposal.Selector, + PieceCID: proposal.PieceCID, + PricePerByte: proposal.PricePerByte, + PaymentInterval: proposal.PaymentInterval, + PaymentIntervalIncrease: proposal.PaymentIntervalIncrease, + UnsealPrice: proposal.UnsealPrice, + }, + } + } + return c.c.dataTransfer.OpenPullDataChannel(ctx, to, vouch, proposal.PayloadCID, sel) +} + +func (c *clientDealEnvironment) SendDataTransferVoucher(ctx context.Context, channelID datatransfer.ChannelID, payment *retrievalmarket.DealPayment, legacy bool) error { + var vouch datatransfer.Voucher = payment + if legacy { + vouch = &migrations.DealPayment0{ + ID: payment.ID, + PaymentChannel: payment.PaymentChannel, + PaymentVoucher: payment.PaymentVoucher, + } + } + return c.c.dataTransfer.SendVoucher(ctx, channelID, vouch) +} + +func (c *clientDealEnvironment) CloseDataTransfer(ctx context.Context, channelID datatransfer.ChannelID) error { + // When we close the data transfer, we also send a cancel message to the peer. + // Make sure we don't wait too long to send the message. + ctx, cancel := context.WithTimeout(ctx, shared.CloseDataTransferTimeout) + defer cancel() + + err := c.c.dataTransfer.CloseDataTransferChannel(ctx, channelID) + if shared.IsCtxDone(err) { + log.Warnf("failed to send cancel data transfer channel %s to provider within timeout %s", + channelID, shared.CloseDataTransferTimeout) + return nil + } + return err +} + +// FinalizeBlockstore is called when all blocks have been received +func (c *clientDealEnvironment) FinalizeBlockstore(ctx context.Context, dealID retrievalmarket.DealID) error { + return c.c.bstores.Done(dealID) +} + +type clientStoreGetter struct { + c *Client +} + +func (csg *clientStoreGetter) Get(_ peer.ID, id retrievalmarket.DealID) (bstore.Blockstore, error) { + var deal retrievalmarket.ClientDealState + err := csg.c.stateMachines.Get(id).Get(&deal) + if err != nil { + return nil, err + } + return csg.c.bstores.Get(id, deal.PayloadCID) +} + +// ClientFSMParameterSpec is a valid set of parameters for a client deal FSM - used in doc generation +var ClientFSMParameterSpec = fsm.Parameters{ + Environment: &clientDealEnvironment{}, + StateType: retrievalmarket.ClientDealState{}, + StateKeyField: "Status", + Events: clientstates.ClientEvents, + StateEntryFuncs: clientstates.ClientStateEntryFuncs, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client_test.go new file mode 100644 index 00000000000..f0e29037261 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/client_test.go @@ -0,0 +1,517 @@ +package retrievalimpl_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dss "github.com/ipfs/go-datastore/sync" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestClient_Construction(t *testing.T) { + + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) + ba := tut.NewTestRetrievalBlockstoreAccessor() + _, err := retrievalimpl.NewClient(net, dt, testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + require.Len(t, dt.Subscribers, 1) + require.Len(t, dt.RegisteredVoucherResultTypes, 2) + _, ok := dt.RegisteredVoucherResultTypes[0].(*retrievalmarket.DealResponse) + require.True(t, ok) + _, ok = dt.RegisteredVoucherResultTypes[1].(*migrations.DealResponse0) + require.True(t, ok) + require.Len(t, dt.RegisteredVoucherTypes, 4) + _, ok = dt.RegisteredVoucherTypes[0].VoucherType.(*retrievalmarket.DealProposal) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[1].VoucherType.(*migrations.DealProposal0) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[2].VoucherType.(*retrievalmarket.DealPayment) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[3].VoucherType.(*migrations.DealPayment0) + require.True(t, ok) + require.Len(t, dt.RegisteredTransportConfigurers, 2) + _, ok = dt.RegisteredTransportConfigurers[0].VoucherType.(*retrievalmarket.DealProposal) + _, ok = dt.RegisteredTransportConfigurers[1].VoucherType.(*migrations.DealProposal0) + require.True(t, ok) +} + +func TestClient_Query(t *testing.T) { + ctx := context.Background() + + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + ba := tut.NewTestRetrievalBlockstoreAccessor() + + pcid := tut.GenerateCids(1)[0] + expectedPeer := peer.ID("somevalue") + rpeer := retrievalmarket.RetrievalPeer{ + Address: address.TestAddress2, + ID: expectedPeer, + } + + expectedQuery := retrievalmarket.Query{ + PayloadCID: pcid, + } + + expectedQueryResponse := retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseAvailable, + Size: 1234, + PaymentAddress: address.TestAddress, + MinPricePerByte: abi.NewTokenAmount(5678), + MaxPaymentInterval: 4321, + MaxPaymentIntervalIncrease: 0, + } + + t.Run("it works", func(t *testing.T) { + var qsb tut.QueryStreamBuilder = func(p peer.ID) (rmnet.RetrievalQueryStream, error) { + return tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + Writer: tut.ExpectQueryWriter(t, expectedQuery, "queries should match"), + RespReader: tut.StubbedQueryResponseReader(expectedQueryResponse), + }), nil + } + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ + QueryStreamBuilder: tut.ExpectPeerOnQueryStreamBuilder(t, expectedPeer, qsb, "Peers should match"), + }) + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + node.ExpectKnownAddresses(rpeer, nil) + c, err := retrievalimpl.NewClient(net, dt, node, &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + resp, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) + require.NoError(t, err) + assert.NotNil(t, resp) + assert.Equal(t, expectedQueryResponse, resp) + node.VerifyExpectations(t) + }) + + t.Run("when the stream returns error, returns error", func(t *testing.T) { + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ + QueryStreamBuilder: tut.FailNewQueryStream, + }) + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + node.ExpectKnownAddresses(rpeer, nil) + c, err := retrievalimpl.NewClient(net, dt, node, &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + _, err = c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) + assert.EqualError(t, err, "new query stream failed") + node.VerifyExpectations(t) + }) + + t.Run("when WriteDealStatusRequest fails, returns error", func(t *testing.T) { + + qsbuilder := func(p peer.ID) (network.RetrievalQueryStream, error) { + newStream := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + PeerID: p, + Writer: tut.FailQueryWriter, + }) + return newStream, nil + } + + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ + QueryStreamBuilder: qsbuilder, + }) + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + node.ExpectKnownAddresses(rpeer, nil) + c, err := retrievalimpl.NewClient(net, dt, node, &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + statusCode, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) + assert.EqualError(t, err, "write query failed") + assert.Equal(t, retrievalmarket.QueryResponseUndefined, statusCode) + node.VerifyExpectations(t) + }) + + t.Run("when ReadDealStatusResponse fails, returns error", func(t *testing.T) { + qsbuilder := func(p peer.ID) (network.RetrievalQueryStream, error) { + newStream := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + PeerID: p, + RespReader: tut.FailResponseReader, + }) + return newStream, nil + } + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ + QueryStreamBuilder: qsbuilder, + }) + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + node.ExpectKnownAddresses(rpeer, nil) + c, err := retrievalimpl.NewClient(net, dt, node, &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + statusCode, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) + assert.EqualError(t, err, "query response failed") + assert.Equal(t, retrievalmarket.QueryResponseUndefined, statusCode) + node.VerifyExpectations(t) + }) +} + +func TestClient_FindProviders(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + ba := tut.NewTestRetrievalBlockstoreAccessor() + expectedPeer := peer.ID("somevalue") + + var qsb tut.QueryStreamBuilder = func(p peer.ID) (rmnet.RetrievalQueryStream, error) { + return tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + Writer: tut.TrivialQueryWriter, + RespReader: tut.TrivialQueryResponseReader, + }), nil + } + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ + QueryStreamBuilder: tut.ExpectPeerOnQueryStreamBuilder(t, expectedPeer, qsb, "Peers should match"), + }) + + t.Run("when providers are found, returns providers", func(t *testing.T) { + peers := tut.RequireGenerateRetrievalPeers(t, 3) + testResolver := tut.TestPeerResolver{Peers: peers} + + c, err := retrievalimpl.NewClient(net, dt, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, ba) + require.NoError(t, err) + + testCid := tut.GenerateCids(1)[0] + assert.Len(t, c.FindProviders(testCid), 3) + }) + + t.Run("when there is an error, returns empty provider list", func(t *testing.T) { + testResolver := tut.TestPeerResolver{Peers: []retrievalmarket.RetrievalPeer{}, ResolverError: errors.New("boom")} + c, err := retrievalimpl.NewClient(net, dt, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, ba) + require.NoError(t, err) + + badCid := tut.GenerateCids(1)[0] + assert.Len(t, c.FindProviders(badCid), 0) + }) + + t.Run("when there are no providers", func(t *testing.T) { + testResolver := tut.TestPeerResolver{Peers: []retrievalmarket.RetrievalPeer{}} + c, err := retrievalimpl.NewClient(net, dt, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, ba) + require.NoError(t, err) + + testCid := tut.GenerateCids(1)[0] + assert.Len(t, c.FindProviders(testCid), 0) + }) +} + +// TestClient_DuplicateRetrieve verifies that it's not possible to make a +// retrieval deal for the same payload CID with the same peer as an existing +// active deal +func TestClient_DuplicateRetrieve(t *testing.T) { + t.Skip("flaky test") + bgCtx := context.Background() + ctx, cancel := context.WithCancel(bgCtx) + defer cancel() + payChAddr := address.TestAddress + + payloadCIDs := tut.GenerateCids(2) + rpeer1 := retrievalmarket.RetrievalPeer{ + Address: address.TestAddress2, + ID: peer.ID("p1"), + } + rpeer2 := retrievalmarket.RetrievalPeer{ + Address: address.TestAddress2, + ID: peer.ID("p2"), + } + + testCases := []struct { + name string + payloadCid1 cid.Cid + payloadCid2 cid.Cid + rpeer1 retrievalmarket.RetrievalPeer + rpeer2 retrievalmarket.RetrievalPeer + expectError bool + cancelled bool + }{{ + name: "different payload CID", + payloadCid1: payloadCIDs[0], + payloadCid2: payloadCIDs[1], + rpeer1: rpeer1, + rpeer2: rpeer1, + }, { + name: "different peer", + payloadCid1: payloadCIDs[0], + payloadCid2: payloadCIDs[0], + rpeer1: rpeer1, + rpeer2: rpeer2, + }, { + name: "same peer and payload CID", + payloadCid1: payloadCIDs[0], + payloadCid2: payloadCIDs[0], + rpeer1: rpeer1, + rpeer2: rpeer1, + expectError: true, + }, { + name: "same peer and payload CID as cancelled deal", + payloadCid1: payloadCIDs[0], + payloadCid2: payloadCIDs[0], + rpeer1: rpeer1, + rpeer2: rpeer1, + cancelled: true, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Set up a retrieval client node with mocks + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + ba := tut.NewTestRetrievalBlockstoreAccessor() + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + node.ExpectKnownAddresses(tc.rpeer1, nil) + node.ExpectKnownAddresses(tc.rpeer2, nil) + + // Create the client + client, err := retrievalimpl.NewClient(net, dt, node, &tut.TestPeerResolver{}, ds, ba) + require.NoError(t, err) + + // Start the client and wait till it's ready + err = client.Start(ctx) + require.NoError(t, err) + + ready := make(chan struct{}) + go func() { + client.OnReady(func(err error) { + close(ready) + }) + }() + select { + case <-ready: + case <-time.After(100 * time.Millisecond): + } + + // Retrieve first payload CID from first peer + params := retrievalmarket.Params{ + Selector: nil, + PieceCID: &tut.GenerateCids(1)[0], + PricePerByte: abi.NewTokenAmount(1), + PaymentInterval: 1, + PaymentIntervalIncrease: 0, + UnsealPrice: abi.NewTokenAmount(0), + } + + dealID, err := client.Retrieve(ctx, 0, tc.payloadCid1, params, abi.NewTokenAmount(10), tc.rpeer1, payChAddr, tc.rpeer1.Address) + assert.NoError(t, err) + + // If the deal should be cancelled + if tc.cancelled { + done := make(chan struct{}) + go func() { + client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { + if state.Status == retrievalmarket.DealStatusCancelled { + close(done) + } + }) + }() + + // Cancel deal and wait for it to complete cancelling + err = client.CancelDeal(dealID) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(500 * time.Millisecond): + } + } + + // Retrieve second payload CID from second peer + _, err = client.Retrieve(ctx, 0, tc.payloadCid2, params, abi.NewTokenAmount(10), tc.rpeer2, payChAddr, tc.rpeer2.Address) + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestMigrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) + retrievalDs := namespace.Wrap(ds, datastore.NewKey("/retrievals/client")) + ba := tut.NewTestRetrievalBlockstoreAccessor() + + numDeals := 5 + payloadCIDs := make([]cid.Cid, numDeals) + iDs := make([]retrievalmarket.DealID, numDeals) + pieceCIDs := make([]*cid.Cid, numDeals) + pricePerBytes := make([]abi.TokenAmount, numDeals) + paymentIntervals := make([]uint64, numDeals) + paymentIntervalIncreases := make([]uint64, numDeals) + unsealPrices := make([]abi.TokenAmount, numDeals) + storeIDs := make([]*uint64, numDeals) + channelIDs := make([]datatransfer.ChannelID, numDeals) + lastPaymentRequesteds := make([]bool, numDeals) + allBlocksReceiveds := make([]bool, numDeals) + totalFundss := make([]abi.TokenAmount, numDeals) + lanes := make([]uint64, numDeals) + senders := make([]peer.ID, numDeals) + totalReceiveds := make([]uint64, numDeals) + messages := make([]string, numDeals) + bytesPaidFors := make([]uint64, numDeals) + currentIntervals := make([]uint64, numDeals) + paymentRequesteds := make([]abi.TokenAmount, numDeals) + fundsSpents := make([]abi.TokenAmount, numDeals) + unsealFundsPaids := make([]abi.TokenAmount, numDeals) + voucherShortfalls := make([]abi.TokenAmount, numDeals) + selfPeer := tut.GeneratePeers(1)[0] + + allSelectorBuf := new(bytes.Buffer) + err := dagcbor.Encode(selectorparse.CommonSelector_ExploreAllRecursively, allSelectorBuf) + require.NoError(t, err) + allSelectorBytes := allSelectorBuf.Bytes() + + for i := 0; i < numDeals; i++ { + payloadCIDs[i] = tut.GenerateCids(1)[0] + iDs[i] = retrievalmarket.DealID(rand.Uint64()) + pieceCID := tut.GenerateCids(1)[0] + pieceCIDs[i] = &pieceCID + pricePerBytes[i] = big.NewInt(rand.Int63()) + paymentIntervals[i] = rand.Uint64() + paymentIntervalIncreases[i] = rand.Uint64() + unsealPrices[i] = big.NewInt(rand.Int63()) + storeID := rand.Uint64() + storeIDs[i] = &storeID + senders[i] = tut.GeneratePeers(1)[0] + channelIDs[i] = datatransfer.ChannelID{ + Initiator: selfPeer, + Responder: senders[i], + ID: datatransfer.TransferID(rand.Uint64()), + } + lastPaymentRequesteds[i] = rand.Intn(2) == 1 + allBlocksReceiveds[i] = rand.Intn(2) == 1 + totalFundss[i] = big.NewInt(rand.Int63()) + lanes[i] = rand.Uint64() + totalReceiveds[i] = rand.Uint64() + messages[i] = string(tut.RandomBytes(20)) + bytesPaidFors[i] = rand.Uint64() + currentIntervals[i] = rand.Uint64() + paymentRequesteds[i] = big.NewInt(rand.Int63()) + fundsSpents[i] = big.NewInt(rand.Int63()) + unsealFundsPaids[i] = big.NewInt(rand.Int63()) + voucherShortfalls[i] = big.NewInt(rand.Int63()) + deal := migrations.ClientDealState0{ + DealProposal0: migrations.DealProposal0{ + PayloadCID: payloadCIDs[i], + ID: iDs[i], + Params0: migrations.Params0{ + Selector: &cbg.Deferred{ + Raw: allSelectorBytes, + }, + PieceCID: pieceCIDs[i], + PricePerByte: pricePerBytes[i], + PaymentInterval: paymentIntervals[i], + PaymentIntervalIncrease: paymentIntervalIncreases[i], + UnsealPrice: unsealPrices[i], + }, + }, + StoreID: storeIDs[i], + ChannelID: channelIDs[i], + LastPaymentRequested: lastPaymentRequesteds[i], + AllBlocksReceived: allBlocksReceiveds[i], + TotalFunds: totalFundss[i], + ClientWallet: address.TestAddress, + MinerWallet: address.TestAddress2, + PaymentInfo: &migrations.PaymentInfo0{ + PayCh: address.TestAddress, + Lane: lanes[i], + }, + Status: retrievalmarket.DealStatusCompleted, + Sender: senders[i], + TotalReceived: totalReceiveds[i], + Message: messages[i], + BytesPaidFor: bytesPaidFors[i], + CurrentInterval: currentIntervals[i], + PaymentRequested: paymentRequesteds[i], + FundsSpent: fundsSpents[i], + UnsealFundsPaid: unsealFundsPaids[i], + WaitMsgCID: nil, + VoucherShortfall: voucherShortfalls[i], + } + buf := new(bytes.Buffer) + err := deal.MarshalCBOR(buf) + require.NoError(t, err) + err = retrievalDs.Put(ctx, datastore.NewKey(fmt.Sprint(deal.ID)), buf.Bytes()) + require.NoError(t, err) + } + retrievalClient, err := retrievalimpl.NewClient(net, dt, testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), &tut.TestPeerResolver{}, retrievalDs, ba) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, retrievalClient) + deals, err := retrievalClient.ListDeals() + require.NoError(t, err) + for i := 0; i < numDeals; i++ { + deal, ok := deals[iDs[i]] + require.True(t, ok) + expectedDeal := retrievalmarket.ClientDealState{ + DealProposal: retrievalmarket.DealProposal{ + PayloadCID: payloadCIDs[i], + ID: iDs[i], + Params: retrievalmarket.Params{ + Selector: &cbg.Deferred{ + Raw: allSelectorBytes, + }, + PieceCID: pieceCIDs[i], + PricePerByte: pricePerBytes[i], + PaymentInterval: paymentIntervals[i], + PaymentIntervalIncrease: paymentIntervalIncreases[i], + UnsealPrice: unsealPrices[i], + }, + }, + StoreID: storeIDs[i], + ChannelID: &channelIDs[i], + LastPaymentRequested: lastPaymentRequesteds[i], + AllBlocksReceived: allBlocksReceiveds[i], + TotalFunds: totalFundss[i], + ClientWallet: address.TestAddress, + MinerWallet: address.TestAddress2, + PaymentInfo: &retrievalmarket.PaymentInfo{ + PayCh: address.TestAddress, + Lane: lanes[i], + }, + Status: retrievalmarket.DealStatusCompleted, + Sender: senders[i], + TotalReceived: totalReceiveds[i], + Message: messages[i], + BytesPaidFor: bytesPaidFors[i], + CurrentInterval: currentIntervals[i], + PaymentRequested: paymentRequesteds[i], + FundsSpent: fundsSpents[i], + UnsealFundsPaid: unsealFundsPaids[i], + WaitMsgCID: nil, + VoucherShortfall: voucherShortfalls[i], + LegacyProtocol: true, + } + require.Equal(t, expectedDeal, deal) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_fsm.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_fsm.go new file mode 100644 index 00000000000..1eb1ba9de92 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_fsm.go @@ -0,0 +1,429 @@ +package clientstates + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +func recordReceived(deal *rm.ClientDealState, totalReceived uint64) error { + deal.TotalReceived = totalReceived + return nil +} + +var paymentChannelCreationStates = []fsm.StateKey{ + rm.DealStatusWaitForAcceptance, + rm.DealStatusWaitForAcceptanceLegacy, + rm.DealStatusAccepted, + rm.DealStatusPaymentChannelCreating, + rm.DealStatusPaymentChannelAddingInitialFunds, + rm.DealStatusPaymentChannelAllocatingLane, +} + +// ClientEvents are the events that can happen in a retrieval client +var ClientEvents = fsm.Events{ + fsm.Event(rm.ClientEventOpen). + From(rm.DealStatusNew).ToNoChange(), + + // ProposeDeal handler events + fsm.Event(rm.ClientEventWriteDealProposalErrored). + FromAny().To(rm.DealStatusErroring). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("proposing deal: %w", err).Error() + return nil + }), + fsm.Event(rm.ClientEventDealProposed). + From(rm.DealStatusNew).To(rm.DealStatusWaitForAcceptance). + From(rm.DealStatusRetryLegacy).To(rm.DealStatusWaitForAcceptanceLegacy). + From(rm.DealStatusCancelling).ToJustRecord(). + Action(func(deal *rm.ClientDealState, channelID datatransfer.ChannelID) error { + deal.ChannelID = &channelID + deal.Message = "" + return nil + }), + + // Initial deal acceptance events + fsm.Event(rm.ClientEventDealRejected). + From(rm.DealStatusWaitForAcceptance).To(rm.DealStatusRetryLegacy). + From(rm.DealStatusWaitForAcceptanceLegacy).To(rm.DealStatusRejecting). + Action(func(deal *rm.ClientDealState, message string) error { + deal.Message = fmt.Sprintf("deal rejected: %s", message) + deal.LegacyProtocol = true + return nil + }), + fsm.Event(rm.ClientEventDealNotFound). + FromMany(rm.DealStatusWaitForAcceptance, rm.DealStatusWaitForAcceptanceLegacy).To(rm.DealStatusDealNotFoundCleanup). + Action(func(deal *rm.ClientDealState, message string) error { + deal.Message = fmt.Sprintf("deal not found: %s", message) + return nil + }), + fsm.Event(rm.ClientEventDealAccepted). + FromMany(rm.DealStatusWaitForAcceptance, rm.DealStatusWaitForAcceptanceLegacy).To(rm.DealStatusAccepted), + fsm.Event(rm.ClientEventUnknownResponseReceived). + FromAny().To(rm.DealStatusFailing). + Action(func(deal *rm.ClientDealState, status rm.DealStatus) error { + deal.Message = fmt.Sprintf("Unexpected deal response status: %s", rm.DealStatuses[status]) + return nil + }), + + // Payment channel setup + fsm.Event(rm.ClientEventPaymentChannelErrored). + FromMany(rm.DealStatusAccepted, rm.DealStatusPaymentChannelCreating, rm.DealStatusPaymentChannelAddingFunds).To(rm.DealStatusFailing). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("error from payment channel: %w", err).Error() + return nil + }), + + // Price of deal is zero so skip creating a payment channel + fsm.Event(rm.ClientEventPaymentChannelSkip). + From(rm.DealStatusAccepted).To(rm.DealStatusOngoing), + + fsm.Event(rm.ClientEventPaymentChannelCreateInitiated). + From(rm.DealStatusAccepted).To(rm.DealStatusPaymentChannelCreating). + Action(func(deal *rm.ClientDealState, msgCID cid.Cid) error { + deal.WaitMsgCID = &msgCID + return nil + }), + + // Client is adding funds to payment channel + fsm.Event(rm.ClientEventPaymentChannelAddingFunds). + // If the deal has just been accepted, we are adding the initial funds + // to the payment channel + FromMany(rm.DealStatusAccepted).To(rm.DealStatusPaymentChannelAddingInitialFunds). + // If the deal was already ongoing, and ran out of funds, we are + // topping up funds in the payment channel + FromMany(rm.DealStatusCheckFunds).To(rm.DealStatusPaymentChannelAddingFunds). + Action(func(deal *rm.ClientDealState, msgCID cid.Cid, payCh address.Address) error { + deal.WaitMsgCID = &msgCID + if deal.PaymentInfo == nil { + deal.PaymentInfo = &rm.PaymentInfo{ + PayCh: payCh, + } + } + return nil + }), + + // The payment channel add funds message has landed on chain + fsm.Event(rm.ClientEventPaymentChannelReady). + // If the payment channel between client and provider was being created + // for the first time, or if the payment channel had already been + // created for an earlier deal but the initial funding for this deal + // was being added, then we still need to allocate a payment channel + // lane + FromMany(rm.DealStatusPaymentChannelCreating, rm.DealStatusPaymentChannelAddingInitialFunds, rm.DealStatusAccepted).To(rm.DealStatusPaymentChannelAllocatingLane). + // If the payment channel ran out of funds and needed to be topped up, + // then the payment channel lane already exists so just move straight + // to the ongoing state + From(rm.DealStatusPaymentChannelAddingFunds).To(rm.DealStatusOngoing). + From(rm.DealStatusCheckFunds).To(rm.DealStatusOngoing). + Action(func(deal *rm.ClientDealState, payCh address.Address) error { + if deal.PaymentInfo == nil { + deal.PaymentInfo = &rm.PaymentInfo{ + PayCh: payCh, + } + } + deal.WaitMsgCID = nil + // remove any insufficient funds message + deal.Message = "" + return nil + }), + + fsm.Event(rm.ClientEventAllocateLaneErrored). + FromMany(rm.DealStatusPaymentChannelAllocatingLane). + To(rm.DealStatusFailing). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("allocating payment lane: %w", err).Error() + return nil + }), + + fsm.Event(rm.ClientEventLaneAllocated). + From(rm.DealStatusPaymentChannelAllocatingLane).To(rm.DealStatusOngoing). + Action(func(deal *rm.ClientDealState, lane uint64) error { + deal.PaymentInfo.Lane = lane + return nil + }), + + // Transfer Channel Errors + fsm.Event(rm.ClientEventDataTransferError). + FromAny().To(rm.DealStatusErroring). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = fmt.Sprintf("error generated by data transfer: %s", err.Error()) + return nil + }), + + // Receiving requests for payment + fsm.Event(rm.ClientEventLastPaymentRequested). + FromMany( + rm.DealStatusOngoing, + rm.DealStatusFundsNeededLastPayment, + rm.DealStatusFundsNeeded).To(rm.DealStatusFundsNeededLastPayment). + From(rm.DealStatusSendFunds).To(rm.DealStatusOngoing). + From(rm.DealStatusCheckComplete).ToNoChange(). + From(rm.DealStatusBlocksComplete).To(rm.DealStatusSendFundsLastPayment). + FromMany( + paymentChannelCreationStates...).ToJustRecord(). + Action(func(deal *rm.ClientDealState, paymentOwed abi.TokenAmount) error { + deal.PaymentRequested = big.Add(deal.PaymentRequested, paymentOwed) + deal.LastPaymentRequested = true + return nil + }), + fsm.Event(rm.ClientEventPaymentRequested). + FromMany( + rm.DealStatusOngoing, + rm.DealStatusBlocksComplete, + rm.DealStatusFundsNeeded, + rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFundsNeeded). + From(rm.DealStatusSendFunds).To(rm.DealStatusOngoing). + From(rm.DealStatusCheckComplete).ToNoChange(). + FromMany( + paymentChannelCreationStates...).ToJustRecord(). + Action(func(deal *rm.ClientDealState, paymentOwed abi.TokenAmount) error { + deal.PaymentRequested = big.Add(deal.PaymentRequested, paymentOwed) + return nil + }), + + fsm.Event(rm.ClientEventUnsealPaymentRequested). + FromMany(rm.DealStatusWaitForAcceptance, rm.DealStatusWaitForAcceptanceLegacy).To(rm.DealStatusAccepted). + Action(func(deal *rm.ClientDealState, paymentOwed abi.TokenAmount) error { + deal.PaymentRequested = big.Add(deal.PaymentRequested, paymentOwed) + return nil + }), + + // Receiving data + fsm.Event(rm.ClientEventAllBlocksReceived). + FromMany( + rm.DealStatusOngoing, + rm.DealStatusBlocksComplete, + ).To(rm.DealStatusBlocksComplete). + FromMany(paymentChannelCreationStates...).ToJustRecord(). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusOngoing). + From(rm.DealStatusFundsNeeded).ToNoChange(). + From(rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusSendFundsLastPayment). + From(rm.DealStatusClientWaitingForLastBlocks).To(rm.DealStatusFinalizingBlockstore). + From(rm.DealStatusCheckComplete).To(rm.DealStatusFinalizingBlockstore). + Action(func(deal *rm.ClientDealState) error { + deal.AllBlocksReceived = true + return nil + }), + fsm.Event(rm.ClientEventBlocksReceived). + FromMany(rm.DealStatusOngoing, + rm.DealStatusFundsNeeded, + rm.DealStatusFundsNeededLastPayment, + rm.DealStatusCheckComplete, + rm.DealStatusClientWaitingForLastBlocks).ToNoChange(). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusOngoing). + FromMany(paymentChannelCreationStates...).ToJustRecord(). + Action(recordReceived), + + fsm.Event(rm.ClientEventSendFunds). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusOngoing). + From(rm.DealStatusFundsNeeded).To(rm.DealStatusSendFunds). + From(rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusSendFundsLastPayment), + + // Sending Payments + fsm.Event(rm.ClientEventFundsExpended). + FromMany(rm.DealStatusCheckFunds).To(rm.DealStatusInsufficientFunds). + Action(func(deal *rm.ClientDealState, shortfall abi.TokenAmount) error { + deal.Message = fmt.Sprintf("not enough current or pending funds in payment channel, shortfall of %s", shortfall.String()) + return nil + }), + fsm.Event(rm.ClientEventBadPaymentRequested). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusFailing). + Action(func(deal *rm.ClientDealState, message string) error { + deal.Message = message + return nil + }), + fsm.Event(rm.ClientEventCreateVoucherFailed). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusFailing). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("creating payment voucher: %w", err).Error() + return nil + }), + fsm.Event(rm.ClientEventVoucherShortfall). + FromMany(rm.DealStatusSendFunds, rm.DealStatusSendFundsLastPayment).To(rm.DealStatusCheckFunds). + Action(func(deal *rm.ClientDealState, shortfall abi.TokenAmount) error { + return nil + }), + + fsm.Event(rm.ClientEventWriteDealPaymentErrored). + FromAny().To(rm.DealStatusErroring). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("writing deal payment: %w", err).Error() + return nil + }), + + // Payment was requested, but there was not actually any payment due, so + // no payment voucher was actually sent + fsm.Event(rm.ClientEventPaymentNotSent). + From(rm.DealStatusOngoing).ToJustRecord(). + From(rm.DealStatusSendFunds).To(rm.DealStatusOngoing). + From(rm.DealStatusSendFundsLastPayment).To(rm.DealStatusFinalizing), + + fsm.Event(rm.ClientEventPaymentSent). + From(rm.DealStatusOngoing).ToJustRecord(). + From(rm.DealStatusBlocksComplete).To(rm.DealStatusCheckComplete). + From(rm.DealStatusCheckComplete).ToNoChange(). + FromMany( + rm.DealStatusFundsNeeded, + rm.DealStatusFundsNeededLastPayment, + rm.DealStatusSendFunds).To(rm.DealStatusOngoing). + From(rm.DealStatusSendFundsLastPayment).To(rm.DealStatusFinalizing). + Action(func(deal *rm.ClientDealState, voucherAmt abi.TokenAmount) error { + // Reduce the payment requested by the amount of funds sent. + // Note that it may not be reduced to zero, if a new payment + // request came in while this one was being processed. + sentAmt := big.Sub(voucherAmt, deal.FundsSpent) + deal.PaymentRequested = big.Sub(deal.PaymentRequested, sentAmt) + + // Update the total funds sent to the provider + deal.FundsSpent = voucherAmt + + // If the unseal price hasn't yet been met, set the unseal funds + // paid to the amount sent to the provider + if deal.UnsealPrice.GreaterThanEqual(deal.FundsSpent) { + deal.UnsealFundsPaid = deal.FundsSpent + return nil + } + // The unseal funds have been fully paid + deal.UnsealFundsPaid = deal.UnsealPrice + + // If the price per byte is zero, no further accounting needed + if deal.PricePerByte.IsZero() { + return nil + } + + // Calculate the amount spent on transferring data, and update the + // bytes paid for accordingly + paidSoFarForTransfer := big.Sub(deal.FundsSpent, deal.UnsealFundsPaid) + deal.BytesPaidFor = big.Div(paidSoFarForTransfer, deal.PricePerByte).Uint64() + + // If the number of bytes paid for is above the current interval, + // increase the interval + if deal.BytesPaidFor >= deal.CurrentInterval { + deal.CurrentInterval = deal.NextInterval() + } + + return nil + }), + + // completing deals + fsm.Event(rm.ClientEventComplete). + FromMany( + rm.DealStatusSendFunds, + rm.DealStatusSendFundsLastPayment, + rm.DealStatusFundsNeeded, + rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusCheckComplete). + From(rm.DealStatusOngoing).To(rm.DealStatusCheckComplete). + From(rm.DealStatusBlocksComplete).To(rm.DealStatusCheckComplete). + From(rm.DealStatusFinalizing).To(rm.DealStatusFinalizingBlockstore), + fsm.Event(rm.ClientEventCompleteVerified). + From(rm.DealStatusCheckComplete).To(rm.DealStatusFinalizingBlockstore), + fsm.Event(rm.ClientEventEarlyTermination). + From(rm.DealStatusCheckComplete).To(rm.DealStatusErroring). + Action(func(deal *rm.ClientDealState) error { + deal.Message = "Provider sent complete status without sending all data" + return nil + }), + + // the provider indicated that all blocks have been sent, so the client + // should wait for the last blocks to arrive (only needed when price + // per byte is zero) + fsm.Event(rm.ClientEventWaitForLastBlocks). + From(rm.DealStatusCheckComplete).To(rm.DealStatusClientWaitingForLastBlocks). + FromMany(rm.DealStatusFinalizingBlockstore, rm.DealStatusCompleted).ToJustRecord(), + + // Once all blocks have been received and the blockstore has been finalized, + // move to the complete state + fsm.Event(rm.ClientEventBlockstoreFinalized). + From(rm.DealStatusFinalizingBlockstore).To(rm.DealStatusCompleted). + From(rm.DealStatusErroring).To(rm.DealStatusErrored). + From(rm.DealStatusRejecting).To(rm.DealStatusRejected). + From(rm.DealStatusDealNotFoundCleanup).To(rm.DealStatusDealNotFound), + + // An error occurred when finalizing the blockstore + fsm.Event(rm.ClientEventFinalizeBlockstoreErrored). + From(rm.DealStatusFinalizingBlockstore).To(rm.DealStatusErrored). + Action(func(deal *rm.ClientDealState, err error) error { + deal.Message = xerrors.Errorf("finalizing blockstore: %w", err).Error() + return nil + }), + + // after cancelling a deal is complete + fsm.Event(rm.ClientEventCancelComplete). + From(rm.DealStatusFailing).To(rm.DealStatusErrored). + From(rm.DealStatusCancelling).To(rm.DealStatusCancelled), + + // receiving a cancel indicating most likely that the provider experienced something wrong on their + // end, unless we are already failing or cancelling + fsm.Event(rm.ClientEventProviderCancelled). + From(rm.DealStatusFailing).ToJustRecord(). + From(rm.DealStatusCancelling).ToJustRecord(). + FromAny().To(rm.DealStatusCancelling).Action( + func(deal *rm.ClientDealState) error { + if deal.Status != rm.DealStatusFailing && deal.Status != rm.DealStatusCancelling { + deal.Message = "Provider cancelled retrieval" + } + return nil + }, + ), + + // user manually cancels retrieval + fsm.Event(rm.ClientEventCancel).FromAny().To(rm.DealStatusCancelling).Action(func(deal *rm.ClientDealState) error { + deal.Message = "Client cancelled retrieval" + return nil + }), + + // payment channel receives more money, we believe there may be reason to recheck the funds for this channel + fsm.Event(rm.ClientEventRecheckFunds).From(rm.DealStatusInsufficientFunds).To(rm.DealStatusCheckFunds), +} + +// ClientFinalityStates are terminal states after which no further events are received +var ClientFinalityStates = []fsm.StateKey{ + rm.DealStatusErrored, + rm.DealStatusCompleted, + rm.DealStatusCancelled, + rm.DealStatusRejected, + rm.DealStatusDealNotFound, +} + +func IsFinalityState(st fsm.StateKey) bool { + for _, state := range ClientFinalityStates { + if st == state { + return true + } + } + return false +} + +// ClientStateEntryFuncs are the handlers for different states in a retrieval client +var ClientStateEntryFuncs = fsm.StateEntryFuncs{ + rm.DealStatusNew: ProposeDeal, + rm.DealStatusRetryLegacy: ProposeDeal, + rm.DealStatusAccepted: SetupPaymentChannelStart, + rm.DealStatusPaymentChannelCreating: WaitPaymentChannelReady, + rm.DealStatusPaymentChannelAddingInitialFunds: WaitPaymentChannelReady, + rm.DealStatusPaymentChannelAllocatingLane: AllocateLane, + rm.DealStatusOngoing: Ongoing, + rm.DealStatusFundsNeeded: ProcessPaymentRequested, + rm.DealStatusFundsNeededLastPayment: ProcessPaymentRequested, + rm.DealStatusSendFunds: SendFunds, + rm.DealStatusSendFundsLastPayment: SendFunds, + rm.DealStatusCheckFunds: CheckFunds, + rm.DealStatusPaymentChannelAddingFunds: WaitPaymentChannelReady, + rm.DealStatusFailing: CancelDeal, + rm.DealStatusCancelling: CancelDeal, + rm.DealStatusCheckComplete: CheckComplete, + rm.DealStatusFinalizingBlockstore: FinalizeBlockstore, + rm.DealStatusErroring: FailsafeFinalizeBlockstore, + rm.DealStatusRejecting: FailsafeFinalizeBlockstore, + rm.DealStatusDealNotFoundCleanup: FailsafeFinalizeBlockstore, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states.go new file mode 100644 index 00000000000..4d55d7f5ffd --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states.go @@ -0,0 +1,291 @@ +package clientstates + +import ( + "context" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +var log = logging.Logger("markets-rtvl") + +// ClientDealEnvironment is a bridge to the environment a client deal is executing in. +// It provides access to relevant functionality on the retrieval client +type ClientDealEnvironment interface { + // Node returns the node interface for this deal + Node() rm.RetrievalClientNode + OpenDataTransfer(ctx context.Context, to peer.ID, proposal *rm.DealProposal, legacy bool) (datatransfer.ChannelID, error) + SendDataTransferVoucher(context.Context, datatransfer.ChannelID, *rm.DealPayment, bool) error + CloseDataTransfer(context.Context, datatransfer.ChannelID) error + FinalizeBlockstore(context.Context, rm.DealID) error +} + +// ProposeDeal sends the proposal to the other party +func ProposeDeal(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + legacy := deal.Status == rm.DealStatusRetryLegacy + channelID, err := environment.OpenDataTransfer(ctx.Context(), deal.Sender, &deal.DealProposal, legacy) + if err != nil { + return ctx.Trigger(rm.ClientEventWriteDealProposalErrored, err) + } + return ctx.Trigger(rm.ClientEventDealProposed, channelID) +} + +// SetupPaymentChannelStart initiates setting up a payment channel for a deal +func SetupPaymentChannelStart(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // If the total funds required for the deal are zero, skip creating the payment channel + if deal.TotalFunds.IsZero() { + return ctx.Trigger(rm.ClientEventPaymentChannelSkip) + } + + tok, _, err := environment.Node().GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) + } + + paych, msgCID, err := environment.Node().GetOrCreatePaymentChannel(ctx.Context(), deal.ClientWallet, deal.MinerWallet, deal.TotalFunds, tok) + if err != nil { + return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) + } + + if paych == address.Undef { + return ctx.Trigger(rm.ClientEventPaymentChannelCreateInitiated, msgCID) + } + + if msgCID == cid.Undef { + return ctx.Trigger(rm.ClientEventPaymentChannelReady, paych) + } + return ctx.Trigger(rm.ClientEventPaymentChannelAddingFunds, msgCID, paych) +} + +// WaitPaymentChannelReady waits for a pending operation on a payment channel -- either creating or depositing funds +func WaitPaymentChannelReady(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + paych, err := environment.Node().WaitForPaymentChannelReady(ctx.Context(), *deal.WaitMsgCID) + if err != nil { + return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) + } + return ctx.Trigger(rm.ClientEventPaymentChannelReady, paych) +} + +// AllocateLane allocates a lane for this retrieval operation +func AllocateLane(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + lane, err := environment.Node().AllocateLane(ctx.Context(), deal.PaymentInfo.PayCh) + if err != nil { + return ctx.Trigger(rm.ClientEventAllocateLaneErrored, err) + } + return ctx.Trigger(rm.ClientEventLaneAllocated, lane) +} + +// Ongoing just double checks that we may need to move out of the ongoing state cause a payment was previously requested +func Ongoing(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + if deal.PaymentRequested.GreaterThan(big.Zero()) { + if deal.LastPaymentRequested { + return ctx.Trigger(rm.ClientEventLastPaymentRequested, big.Zero()) + } + return ctx.Trigger(rm.ClientEventPaymentRequested, big.Zero()) + } + return nil +} + +// ProcessPaymentRequested processes a request for payment from the provider +func ProcessPaymentRequested(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // If the unseal payment hasn't been made, we need to send funds + if deal.UnsealPrice.GreaterThan(deal.UnsealFundsPaid) { + log.Debugf("client: payment needed: unseal price %d > unseal paid %d", + deal.UnsealPrice, deal.UnsealFundsPaid) + return ctx.Trigger(rm.ClientEventSendFunds) + } + + // If all bytes received have been paid for, we don't need to send funds + if deal.BytesPaidFor >= deal.TotalReceived { + log.Debugf("client: no payment needed: bytes paid for %d >= bytes received %d", + deal.BytesPaidFor, deal.TotalReceived) + return nil + } + + // Not all bytes received have been paid for + + // If all blocks have been received we need to send a final payment + if deal.AllBlocksReceived { + log.Debugf("client: payment needed: all blocks received, bytes paid for %d < bytes received %d", + deal.BytesPaidFor, deal.TotalReceived) + return ctx.Trigger(rm.ClientEventSendFunds) + } + + // Payments are made in intervals, as bytes are received from the provider. + // If the number of bytes received is at or above the size of the current + // interval, we need to send a payment. + if deal.TotalReceived >= deal.CurrentInterval { + log.Debugf("client: payment needed: bytes received %d >= interval %d, bytes paid for %d < bytes received %d", + deal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor, deal.TotalReceived) + return ctx.Trigger(rm.ClientEventSendFunds) + } + + log.Debugf("client: no payment needed: received %d < interval %d (paid for %d)", + deal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor) + return nil +} + +// SendFunds sends the next amount requested by the provider +func SendFunds(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + totalBytesToPayFor := deal.TotalReceived + + // If unsealing has been paid for, and not all blocks have been received, + // and the number of bytes received is less than the number required + // for the current payment interval, no need to send a payment + if deal.UnsealFundsPaid.GreaterThanEqual(deal.UnsealPrice) && + !deal.AllBlocksReceived && + totalBytesToPayFor < deal.CurrentInterval { + + log.Debugf("client: ignoring payment request for %d: total bytes to pay for %d < interval %d", + deal.PaymentRequested, totalBytesToPayFor, deal.CurrentInterval) + return ctx.Trigger(rm.ClientEventPaymentNotSent) + } + + tok, _, err := environment.Node().GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(rm.ClientEventCreateVoucherFailed, err) + } + + // Calculate the payment amount due for data received + transferPrice := big.Mul(abi.NewTokenAmount(int64(totalBytesToPayFor)), deal.PricePerByte) + // Calculate the total amount including the unsealing cost + totalPrice := big.Add(transferPrice, deal.UnsealPrice) + + // If we've already sent at or above the amount due, no need to send funds + if totalPrice.LessThanEqual(deal.FundsSpent) { + log.Debugf("client: not sending voucher: funds spent %d >= total price %d: transfer price %d + unseal price %d (payment requested %d)", + deal.FundsSpent, totalPrice, transferPrice, deal.UnsealPrice, deal.PaymentRequested) + return ctx.Trigger(rm.ClientEventPaymentNotSent) + } + + log.Debugf("client: sending voucher for %d = transfer price %d + unseal price %d (payment requested %d)", + totalPrice, transferPrice, deal.UnsealPrice, deal.PaymentRequested) + + // Create a payment voucher + voucher, err := environment.Node().CreatePaymentVoucher(ctx.Context(), deal.PaymentInfo.PayCh, totalPrice, deal.PaymentInfo.Lane, tok) + if err != nil { + shortfallErr, ok := err.(rm.ShortfallError) + if ok { + // There were not enough funds in the payment channel to create a + // voucher of this amount, so the client needs to add more funds to + // the payment channel + log.Debugf("client: voucher shortfall of %d when creating voucher for %d", + shortfallErr.Shortfall(), totalPrice) + return ctx.Trigger(rm.ClientEventVoucherShortfall, shortfallErr.Shortfall()) + } + return ctx.Trigger(rm.ClientEventCreateVoucherFailed, err) + } + + // Send the payment voucher + err = environment.SendDataTransferVoucher(ctx.Context(), *deal.ChannelID, &rm.DealPayment{ + ID: deal.DealProposal.ID, + PaymentChannel: deal.PaymentInfo.PayCh, + PaymentVoucher: voucher, + }, deal.LegacyProtocol) + if err != nil { + return ctx.Trigger(rm.ClientEventWriteDealPaymentErrored, err) + } + + return ctx.Trigger(rm.ClientEventPaymentSent, totalPrice) +} + +// CheckFunds examines current available funds in a payment channel after a voucher shortfall to determine +// a course of action -- whether it's a good time to try again, wait for pending operations, or +// we've truly expended all funds and we need to wait for a manual readd +func CheckFunds(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // if we already have an outstanding operation, let's wait for that to complete + if deal.WaitMsgCID != nil { + return ctx.Trigger(rm.ClientEventPaymentChannelAddingFunds, *deal.WaitMsgCID, deal.PaymentInfo.PayCh) + } + availableFunds, err := environment.Node().CheckAvailableFunds(ctx.Context(), deal.PaymentInfo.PayCh) + if err != nil { + return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) + } + unredeemedFunds := big.Sub(availableFunds.ConfirmedAmt, availableFunds.VoucherReedeemedAmt) + shortfall := big.Sub(deal.PaymentRequested, unredeemedFunds) + if shortfall.LessThanEqual(big.Zero()) { + return ctx.Trigger(rm.ClientEventPaymentChannelReady, deal.PaymentInfo.PayCh) + } + totalInFlight := big.Add(availableFunds.PendingAmt, availableFunds.QueuedAmt) + if totalInFlight.LessThan(shortfall) || availableFunds.PendingWaitSentinel == nil { + finalShortfall := big.Sub(shortfall, totalInFlight) + return ctx.Trigger(rm.ClientEventFundsExpended, finalShortfall) + } + return ctx.Trigger(rm.ClientEventPaymentChannelAddingFunds, *availableFunds.PendingWaitSentinel, deal.PaymentInfo.PayCh) +} + +// CancelDeal clears a deal that went wrong for an unknown reason +func CancelDeal(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // Attempt to finalize the blockstore. If it fails just log an error as + // we want to make sure we end up in the cancelled state (not an error + // state) + if err := environment.FinalizeBlockstore(ctx.Context(), deal.ID); err != nil { + log.Errorf("failed to finalize blockstore for deal %s: %s", deal.ID, err) + } + + // If the data transfer has started, cancel it + if deal.ChannelID != nil { + // Read next response (or fail) + err := environment.CloseDataTransfer(ctx.Context(), *deal.ChannelID) + if err != nil { + return ctx.Trigger(rm.ClientEventDataTransferError, err) + } + } + + return ctx.Trigger(rm.ClientEventCancelComplete) +} + +// CheckComplete verifies that a provider that completed without a last payment requested did in fact send us all the data +func CheckComplete(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // This function is called when the provider tells the client that it has + // sent all the blocks, so check if all blocks have been received. + if deal.AllBlocksReceived { + return ctx.Trigger(rm.ClientEventCompleteVerified) + } + + // If the deal price per byte is zero, wait for the last blocks to + // arrive + if deal.PricePerByte.IsZero() { + return ctx.Trigger(rm.ClientEventWaitForLastBlocks) + } + + // If the deal price per byte is non-zero, the provider should only + // have sent the complete message after receiving the last payment + // from the client, which should happen after all blocks have been + // received. So if they haven't been received the provider is trying + // to terminate the deal early. + return ctx.Trigger(rm.ClientEventEarlyTermination) +} + +// FinalizeBlockstore is called once all blocks have been received and the +// blockstore needs to be finalized before completing the deal +func FinalizeBlockstore(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + if err := environment.FinalizeBlockstore(ctx.Context(), deal.ID); err != nil { + return ctx.Trigger(rm.ClientEventFinalizeBlockstoreErrored, err) + } + return ctx.Trigger(rm.ClientEventBlockstoreFinalized) +} + +// FailsafeFinalizeBlockstore is called when there is a termination state +// because of some irregularity (eg deal not found). +// It attempts to clean up the blockstore, but even if there's an error it +// always fires a blockstore finalized event so that we still end up in the +// appropriate termination state. +func FailsafeFinalizeBlockstore(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { + // Attempt to finalize the blockstore. If it fails just log an error as + // we want to make sure we end up in a specific termination state (not + // necessarily the error state) + if err := environment.FinalizeBlockstore(ctx.Context(), deal.ID); err != nil { + log.Errorf("failed to finalize blockstore for deal %s: %s", deal.ID, err) + } + return ctx.Trigger(rm.ClientEventBlockstoreFinalized) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states_test.go new file mode 100644 index 00000000000..099b3d98c66 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/client_states_test.go @@ -0,0 +1,914 @@ +package clientstates_test + +import ( + "context" + "errors" + "math/rand" + "testing" + + "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/go-statemachine/fsm" + fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +type consumeBlockResponse struct { + size uint64 + done bool + err error +} + +type fakeEnvironment struct { + node retrievalmarket.RetrievalClientNode + OpenDataTransferError error + SendDataTransferVoucherError error + CloseDataTransferError error + FinalizeBlockstoreError error +} + +func (e *fakeEnvironment) Node() retrievalmarket.RetrievalClientNode { + return e.node +} + +func (e *fakeEnvironment) OpenDataTransfer(ctx context.Context, to peer.ID, proposal *rm.DealProposal, legacy bool) (datatransfer.ChannelID, error) { + return datatransfer.ChannelID{ID: datatransfer.TransferID(rand.Uint64()), Responder: to, Initiator: testnet.GeneratePeers(1)[0]}, e.OpenDataTransferError +} + +func (e *fakeEnvironment) SendDataTransferVoucher(_ context.Context, _ datatransfer.ChannelID, _ *rm.DealPayment, _ bool) error { + return e.SendDataTransferVoucherError +} + +func (e *fakeEnvironment) CloseDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return e.CloseDataTransferError +} + +func (e *fakeEnvironment) FinalizeBlockstore(ctx context.Context, id rm.DealID) error { + return e.FinalizeBlockstoreError +} + +func TestProposeDeal(t *testing.T) { + ctx := context.Background() + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runProposeDeal := func(t *testing.T, openError error, dealState *retrievalmarket.ClientDealState) { + environment := &fakeEnvironment{node: node, OpenDataTransferError: openError} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.ProposeDeal(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusNew) + var openError error = nil + runProposeDeal(t, openError, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusWaitForAcceptance) + require.Equal(t, dealState.ChannelID.Responder, dealState.Sender) + }) + + t.Run("it works, legacy", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusRetryLegacy) + var openError error = nil + runProposeDeal(t, openError, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusWaitForAcceptanceLegacy) + require.Equal(t, dealState.ChannelID.Responder, dealState.Sender) + }) + + t.Run("data transfer eror", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusNew) + openError := errors.New("something went wrong") + runProposeDeal(t, openError, dealState) + require.NotEmpty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusErroring) + }) +} + +func TestSetupPaymentChannel(t *testing.T) { + ctx := context.Background() + expectedPayCh := address.TestAddress2 + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runSetupPaymentChannel := func(t *testing.T, + params testnodes.TestRetrievalClientNodeParams, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.SetupPaymentChannelStart(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("payment channel create initiated", func(t *testing.T) { + envParams := testnodes.TestRetrievalClientNodeParams{ + PayCh: address.Undef, + CreatePaychCID: testnet.GenerateCids(1)[0], + } + dealState := makeDealState(retrievalmarket.DealStatusAccepted) + runSetupPaymentChannel(t, envParams, dealState) + assert.Empty(t, dealState.Message) + require.Equal(t, envParams.CreatePaychCID, *dealState.WaitMsgCID) + assert.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelCreating) + }) + + t.Run("payment channel needs funds added", func(t *testing.T) { + envParams := testnodes.TestRetrievalClientNodeParams{ + AddFundsOnly: true, + PayCh: expectedPayCh, + AddFundsCID: testnet.GenerateCids(1)[0], + } + dealState := makeDealState(retrievalmarket.DealStatusAccepted) + runSetupPaymentChannel(t, envParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, envParams.AddFundsCID, *dealState.WaitMsgCID) + require.Equal(t, retrievalmarket.DealStatusPaymentChannelAddingInitialFunds, dealState.Status) + require.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) + }) + + t.Run("payment channel fully ready", func(t *testing.T) { + envParams := testnodes.TestRetrievalClientNodeParams{ + AddFundsOnly: true, + PayCh: expectedPayCh, + AddFundsCID: cid.Undef, + } + dealState := makeDealState(retrievalmarket.DealStatusAccepted) + runSetupPaymentChannel(t, envParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelAllocatingLane) + require.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) + }) + + t.Run("when create payment channel fails", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusAccepted) + envParams := testnodes.TestRetrievalClientNodeParams{ + PayCh: address.Undef, + PayChErr: errors.New("Something went wrong"), + } + runSetupPaymentChannel(t, envParams, dealState) + require.NotEmpty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailing) + }) + + t.Run("payment channel skip if total funds is zero", func(t *testing.T) { + envParams := testnodes.TestRetrievalClientNodeParams{} + dealState := makeDealState(retrievalmarket.DealStatusAccepted) + dealState.TotalFunds = abi.NewTokenAmount(0) + runSetupPaymentChannel(t, envParams, dealState) + assert.Empty(t, dealState.Message) + assert.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) +} + +func TestWaitForPaymentReady(t *testing.T) { + ctx := context.Background() + expectedPayCh := address.TestAddress2 + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runWaitForPaychReady := func(t *testing.T, + params testnodes.TestRetrievalClientNodeParams, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.WaitPaymentChannelReady(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + msgCID := testnet.GenerateCids(1)[0] + + t.Run("it works, creating state", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelCreating) + dealState.WaitMsgCID = &msgCID + params := testnodes.TestRetrievalClientNodeParams{ + PayCh: expectedPayCh, + CreatePaychCID: msgCID, + } + runWaitForPaychReady(t, params, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelAllocatingLane) + require.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) + }) + t.Run("if Wait fails", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelCreating) + dealState.WaitMsgCID = &msgCID + params := testnodes.TestRetrievalClientNodeParams{ + PayCh: expectedPayCh, + CreatePaychCID: msgCID, + WaitForReadyErr: errors.New("boom"), + } + runWaitForPaychReady(t, params, dealState) + require.Contains(t, dealState.Message, "boom") + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailing) + }) + t.Run("it works, waiting for added funds", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAddingFunds) + dealState.WaitMsgCID = &msgCID + params := testnodes.TestRetrievalClientNodeParams{ + PayCh: expectedPayCh, + AddFundsCID: msgCID, + AddFundsOnly: true, + } + runWaitForPaychReady(t, params, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) +} + +func TestAllocateLane(t *testing.T) { + ctx := context.Background() + expectedLane := uint64(10) + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runAllocateLane := func(t *testing.T, + params testnodes.TestRetrievalClientNodeParams, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.AllocateLane(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it succeeds", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAllocatingLane) + params := testnodes.TestRetrievalClientNodeParams{ + Lane: expectedLane, + } + runAllocateLane(t, params, dealState) + require.Equal(t, retrievalmarket.DealStatusOngoing, dealState.Status) + require.Equal(t, expectedLane, dealState.PaymentInfo.Lane) + }) + + t.Run("if AllocateLane fails", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAllocatingLane) + params := testnodes.TestRetrievalClientNodeParams{ + LaneError: errors.New("boom"), + } + runAllocateLane(t, params, dealState) + require.Contains(t, dealState.Message, "boom") + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailing) + }) +} + +func TestOngoing(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runOngoing := func(t *testing.T, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.Ongoing(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works - no change", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusOngoing) + dealState.PaymentRequested = big.Zero() + runOngoing(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("it works - payment requested", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusOngoing) + runOngoing(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) + }) + + t.Run("it works - last payment requested", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusOngoing) + dealState.LastPaymentRequested = true + runOngoing(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeededLastPayment) + }) +} + +func TestProcessPaymentRequested(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runProcessPaymentRequested := func(t *testing.T, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.ProcessPaymentRequested(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("send funds last payment", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeededLastPayment) + dealState.TotalReceived = defaultBytesPaidFor + 500 + dealState.AllBlocksReceived = true + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusSendFundsLastPayment) + }) + + t.Run("send funds if unseal funds needed", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) + dealState.UnsealPrice = abi.NewTokenAmount(1000) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusSendFunds) + }) + + t.Run("dont send funds if paid for all bytes received", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) + dealState.BytesPaidFor = 1000 + dealState.TotalReceived = 1000 + dealState.CurrentInterval = 1000 + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) + }) + + t.Run("send funds if not all bytes paid for and all blocks received", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) + dealState.BytesPaidFor = 900 + dealState.TotalReceived = 1000 + dealState.AllBlocksReceived = true + dealState.CurrentInterval = 1000 + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusSendFunds) + }) + + t.Run("send funds if total received > current deal interval", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) + dealState.BytesPaidFor = 900 + dealState.TotalReceived = 1000 + dealState.CurrentInterval = 900 + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusSendFunds) + }) + + t.Run("dont send funds if total received < current deal interval", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) + dealState.BytesPaidFor = 900 + dealState.TotalReceived = 999 + dealState.CurrentInterval = 1000 + runProcessPaymentRequested(t, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) + }) +} + +func TestSendFunds(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runSendFunds := func(t *testing.T, + sendDataTransferVoucherError error, + nodeParams testnodes.TestRetrievalClientNodeParams, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(nodeParams) + environment := &fakeEnvironment{node: node, SendDataTransferVoucherError: sendDataTransferVoucherError} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + dealState.ChannelID = &datatransfer.ChannelID{ + Initiator: "initiator", + Responder: dealState.Sender, + ID: 1, + } + err := clientstates.SendFunds(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + testVoucher := &paych.SignedVoucher{} + + t.Run("send funds", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(200) + dealState.UnsealFundsPaid = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 800 + dealState.FundsSpent = abi.NewTokenAmount(1000) + dealState.PaymentRequested = abi.NewTokenAmount(500) + dealState.CurrentInterval = 1000 + dealState.PaymentInterval = 1000 + dealState.PaymentIntervalIncrease = 100 + dealState.TotalReceived = 1000 + + // Should send voucher for 1200 = transfer price (1000 * 1) + unseal price 200 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(500-(1000-800))) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1000+200)) + require.EqualValues(t, dealState.BytesPaidFor, 1000) + require.EqualValues(t, dealState.CurrentInterval, 1000+(1000+100)) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("send funds last payment", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFundsLastPayment) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(200) + dealState.UnsealFundsPaid = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 800 + dealState.FundsSpent = abi.NewTokenAmount(1000) + dealState.PaymentRequested = abi.NewTokenAmount(500) + dealState.CurrentInterval = 1000 + dealState.PaymentInterval = 1000 + dealState.PaymentIntervalIncrease = 100 + dealState.TotalReceived = 1000 + + // Should send voucher for 1200 = transfer price (1000 * 1) + unseal price 200 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(500-(1000-800))) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1000+200)) + require.EqualValues(t, dealState.BytesPaidFor, 1000) + require.EqualValues(t, dealState.CurrentInterval, 1000+(1000+100)) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFinalizing) + }) + + t.Run("dont send funds if total received less then interval", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(200) + dealState.UnsealFundsPaid = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 800 + dealState.FundsSpent = abi.NewTokenAmount(1000) + dealState.PaymentRequested = abi.NewTokenAmount(500) + dealState.CurrentInterval = 2000 + dealState.TotalReceived = 1000 + + // Should not send voucher + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(500)) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1000)) + require.EqualValues(t, dealState.BytesPaidFor, 800) + require.EqualValues(t, dealState.CurrentInterval, 2000) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("dont send funds if total price <= funds spent", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(200) + dealState.UnsealFundsPaid = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 800 + dealState.FundsSpent = abi.NewTokenAmount(1200) + dealState.PaymentRequested = abi.NewTokenAmount(500) + dealState.CurrentInterval = 1000 + dealState.PaymentInterval = 1000 + dealState.PaymentIntervalIncrease = 100 + dealState.TotalReceived = 1000 + + // Total price 1200 = transfer price (1000 * 1) + unseal price 200 + // Funds spent = 1200 + // So don't send voucher + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(500)) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1200)) + require.EqualValues(t, dealState.BytesPaidFor, 800) + require.EqualValues(t, dealState.CurrentInterval, 1000) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("dont send funds if interval not met", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(0) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.FundsSpent = abi.NewTokenAmount(1000) + dealState.PaymentRequested = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 1000 + dealState.TotalReceived = 1200 + dealState.CurrentInterval = 2000 + + // Should not send voucher: bytes received 1200 < interval 2000 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(200)) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1000)) + require.EqualValues(t, dealState.BytesPaidFor, 1000) + require.EqualValues(t, dealState.CurrentInterval, 2000) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("send funds if all blocks received, even if interval not met", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(0) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.FundsSpent = abi.NewTokenAmount(1000) + dealState.PaymentRequested = abi.NewTokenAmount(200) + dealState.BytesPaidFor = 1000 + dealState.TotalReceived = 1200 + dealState.CurrentInterval = 2000 + dealState.AllBlocksReceived = true + + // Should send voucher for 1200 = transfer price (1200 * 1) + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.True(t, dealState.PaymentRequested.IsZero()) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(1200)) + require.EqualValues(t, dealState.BytesPaidFor, 1200) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("only unsealing payment is accounted for when price per bytes is zero", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFundsLastPayment) + + dealState.PricePerByte = abi.NewTokenAmount(0) + dealState.UnsealPrice = abi.NewTokenAmount(200) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.BytesPaidFor = 0 + dealState.FundsSpent = abi.NewTokenAmount(0) + dealState.PaymentRequested = abi.NewTokenAmount(200) + dealState.CurrentInterval = 1000 + dealState.TotalReceived = 1000 + + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.True(t, dealState.PaymentRequested.IsZero()) + require.Equal(t, dealState.FundsSpent, abi.NewTokenAmount(200)) + require.EqualValues(t, dealState.BytesPaidFor, 0) + require.EqualValues(t, dealState.CurrentInterval, 1000) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFinalizing) + }) + + t.Run("voucher create fails", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + nodeParams := testnodes.TestRetrievalClientNodeParams{ + VoucherError: errors.New("Something Went Wrong"), + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(0) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.BytesPaidFor = 0 + dealState.FundsSpent = abi.NewTokenAmount(0) + dealState.PaymentRequested = abi.NewTokenAmount(1000) + dealState.CurrentInterval = 1000 + dealState.TotalReceived = 1000 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.NotEmpty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailing) + }) + + t.Run("voucher create with shortfall", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + var sendVoucherError error = nil + shortFall := abi.NewTokenAmount(10000) + nodeParams := testnodes.TestRetrievalClientNodeParams{ + VoucherError: retrievalmarket.NewShortfallError(shortFall), + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(0) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.BytesPaidFor = 0 + dealState.FundsSpent = abi.NewTokenAmount(0) + dealState.PaymentRequested = abi.NewTokenAmount(1000) + dealState.CurrentInterval = 1000 + dealState.TotalReceived = 1000 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.Empty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusCheckFunds) + }) + + t.Run("unable to send payment", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusSendFunds) + sendVoucherError := errors.New("something went wrong") + nodeParams := testnodes.TestRetrievalClientNodeParams{ + Voucher: testVoucher, + } + dealState.PricePerByte = abi.NewTokenAmount(1) + dealState.UnsealPrice = abi.NewTokenAmount(0) + dealState.UnsealFundsPaid = abi.NewTokenAmount(0) + dealState.BytesPaidFor = 0 + dealState.FundsSpent = abi.NewTokenAmount(0) + dealState.PaymentRequested = abi.NewTokenAmount(1000) + dealState.CurrentInterval = 1000 + dealState.TotalReceived = 1000 + runSendFunds(t, sendVoucherError, nodeParams, dealState) + require.NotEmpty(t, dealState.Message) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusErroring) + }) +} + +func TestCheckFunds(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runCheckFunds := func(t *testing.T, + params testnodes.TestRetrievalClientNodeParams, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.CheckFunds(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + msgCid := testnet.GenerateCids(1)[0] + + t.Run("already waiting on add funds", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckFunds) + dealState.WaitMsgCID = &msgCid + nodeParams := testnodes.TestRetrievalClientNodeParams{} + runCheckFunds(t, nodeParams, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelAddingFunds) + }) + + t.Run("confirmed funds already covers payment", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckFunds) + dealState.PaymentRequested = abi.NewTokenAmount(10000) + nodeParams := testnodes.TestRetrievalClientNodeParams{ + ChannelAvailableFunds: rm.ChannelAvailableFunds{ + ConfirmedAmt: abi.NewTokenAmount(10000), + }, + } + runCheckFunds(t, nodeParams, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) + }) + + t.Run("pending funds covers shortfal", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckFunds) + dealState.PaymentRequested = abi.NewTokenAmount(10000) + nodeParams := testnodes.TestRetrievalClientNodeParams{ + ChannelAvailableFunds: rm.ChannelAvailableFunds{ + PendingAmt: abi.NewTokenAmount(8000), + PendingWaitSentinel: &msgCid, + QueuedAmt: abi.NewTokenAmount(4000), + }, + } + runCheckFunds(t, nodeParams, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelAddingFunds) + require.True(t, dealState.WaitMsgCID.Equals(msgCid)) + }) + + t.Run("pending funds don't cover shortfal", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckFunds) + dealState.PaymentRequested = abi.NewTokenAmount(10000) + nodeParams := testnodes.TestRetrievalClientNodeParams{ + ChannelAvailableFunds: rm.ChannelAvailableFunds{ + PendingAmt: abi.NewTokenAmount(8000), + PendingWaitSentinel: &msgCid, + }, + } + runCheckFunds(t, nodeParams, dealState) + require.Equal(t, dealState.Status, retrievalmarket.DealStatusInsufficientFunds) + }) +} + +func TestCancelDeal(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runCancelDeal := func(t *testing.T, + closeError error, + finalizeBlockstoreError error, + dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + environment := &fakeEnvironment{ + node: node, + CloseDataTransferError: closeError, + FinalizeBlockstoreError: finalizeBlockstoreError, + } + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + dealState.ChannelID = &datatransfer.ChannelID{ + Initiator: "initiator", + Responder: dealState.Sender, + ID: 1, + } + err := clientstates.CancelDeal(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFailing) + dealState.Message = "Previous error" + runCancelDeal(t, nil, nil, dealState) + require.Equal(t, "Previous error", dealState.Message) + require.Equal(t, retrievalmarket.DealStatusErrored, dealState.Status) + }) + + t.Run("error closing stream", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFailing) + dealState.Message = "Previous error" + runCancelDeal(t, errors.New("something went wrong"), nil, dealState) + require.NotEqual(t, "Previous error", dealState.Message) + require.NotEmpty(t, dealState.Message) + require.Equal(t, retrievalmarket.DealStatusErroring, dealState.Status) + }) + + // Note: we ignore a finalize blockstore error while cancelling + t.Run("error finalizing blockstore", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCancelling) + dealState.Message = "Previous error" + runCancelDeal(t, nil, errors.New("finalize blockstore err"), dealState) + require.Equal(t, "Previous error", dealState.Message) + require.Equal(t, retrievalmarket.DealStatusCancelled, dealState.Status) + }) + + t.Run("it works, cancelling", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCancelling) + dealState.Message = "Previous error" + runCancelDeal(t, nil, nil, dealState) + require.Equal(t, "Previous error", dealState.Message) + require.Equal(t, retrievalmarket.DealStatusCancelled, dealState.Status) + }) +} + +func TestCheckComplete(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runCheckComplete := func(t *testing.T, dealState *retrievalmarket.ClientDealState) { + node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) + environment := &fakeEnvironment{node: node} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.CheckComplete(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("when all blocks received", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckComplete) + dealState.AllBlocksReceived = true + runCheckComplete(t, dealState) + require.Equal(t, retrievalmarket.DealStatusFinalizingBlockstore, dealState.Status) + }) + + t.Run("when not all blocks are received", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckComplete) + dealState.AllBlocksReceived = false + runCheckComplete(t, dealState) + require.Equal(t, retrievalmarket.DealStatusErroring, dealState.Status) + require.Equal(t, "Provider sent complete status without sending all data", dealState.Message) + }) + + t.Run("when not all blocks are received and deal price per byte is zero", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusCheckComplete) + dealState.PricePerByte = abi.NewTokenAmount(0) + dealState.AllBlocksReceived = false + runCheckComplete(t, dealState) + require.Equal(t, retrievalmarket.DealStatusClientWaitingForLastBlocks, dealState.Status) + }) +} + +func TestFinalizeBlockstore(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runFinalizeBlockstore := func(t *testing.T, + finalizeBlockstoreError error, + dealState *retrievalmarket.ClientDealState, + ) { + params := testnodes.TestRetrievalClientNodeParams{} + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node, FinalizeBlockstoreError: finalizeBlockstoreError} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.FinalizeBlockstore(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it succeeds", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFinalizingBlockstore) + runFinalizeBlockstore(t, nil, dealState) + require.Equal(t, retrievalmarket.DealStatusCompleted, dealState.Status) + }) + + t.Run("if FinalizeBlockstore fails", func(t *testing.T) { + dealState := makeDealState(retrievalmarket.DealStatusFinalizingBlockstore) + err := errors.New("boom") + runFinalizeBlockstore(t, err, dealState) + require.Contains(t, dealState.Message, "boom") + require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) + }) +} + +func TestFailsafeFinalizeBlockstore(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) + require.NoError(t, err) + runFailsafeFinalizeBlockstore := func(t *testing.T, + finalizeBlockstoreError error, + dealState *retrievalmarket.ClientDealState, + ) { + params := testnodes.TestRetrievalClientNodeParams{} + node := testnodes.NewTestRetrievalClientNode(params) + environment := &fakeEnvironment{node: node, FinalizeBlockstoreError: finalizeBlockstoreError} + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := clientstates.FailsafeFinalizeBlockstore(fsmCtx, environment, *dealState) + require.NoError(t, err) + fsmCtx.ReplayEvents(t, dealState) + } + + statuses := [][2]retrievalmarket.DealStatus{{ + rm.DealStatusErroring, rm.DealStatusErrored, + }, { + rm.DealStatusRejecting, rm.DealStatusRejected, + }, { + rm.DealStatusDealNotFoundCleanup, rm.DealStatusDealNotFound, + }} + for _, states := range statuses { + startState := states[0] + endState := states[1] + t.Run("in state "+startState.String(), func(t *testing.T) { + t.Run("it succeeds", func(t *testing.T) { + dealState := makeDealState(startState) + runFailsafeFinalizeBlockstore(t, nil, dealState) + require.Equal(t, endState, dealState.Status) + }) + + // Note that even if FinalizeBlockstore fails we still expect to + // move to the correct end state + t.Run("if FinalizeBlockstore fails", func(t *testing.T) { + dealState := makeDealState(startState) + err := errors.New("boom") + runFailsafeFinalizeBlockstore(t, err, dealState) + require.Equal(t, endState, dealState.Status) + }) + }) + } +} + +var defaultTotalFunds = abi.NewTokenAmount(4000000) +var defaultCurrentInterval = uint64(1000) +var defaultIntervalIncrease = uint64(500) +var defaultPricePerByte = abi.NewTokenAmount(500) +var defaultTotalReceived = uint64(6000) +var defaultBytesPaidFor = uint64(5000) +var defaultFundsSpent = abi.NewTokenAmount(2500000) +var defaultPaymentRequested = abi.NewTokenAmount(500000) +var defaultUnsealFundsPaid = abi.NewTokenAmount(0) + +func makeDealState(status retrievalmarket.DealStatus) *retrievalmarket.ClientDealState { + paymentInfo := &retrievalmarket.PaymentInfo{} + + switch status { + case retrievalmarket.DealStatusNew, retrievalmarket.DealStatusAccepted, retrievalmarket.DealStatusPaymentChannelCreating: + paymentInfo = nil + } + + return &retrievalmarket.ClientDealState{ + TotalFunds: defaultTotalFunds, + MinerWallet: address.TestAddress, + ClientWallet: address.TestAddress2, + PaymentInfo: paymentInfo, + Status: status, + BytesPaidFor: defaultBytesPaidFor, + TotalReceived: defaultTotalReceived, + CurrentInterval: defaultCurrentInterval, + FundsSpent: defaultFundsSpent, + UnsealFundsPaid: defaultUnsealFundsPaid, + PaymentRequested: defaultPaymentRequested, + DealProposal: retrievalmarket.DealProposal{ + ID: retrievalmarket.DealID(10), + Params: retrievalmarket.NewParamsV0(defaultPricePerByte, 0, defaultIntervalIncrease), + }, + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/doc.go new file mode 100644 index 00000000000..a97c9531247 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/clientstates/doc.go @@ -0,0 +1,13 @@ +/* +Package clientstates contains state machine logic relating to the `RetrievalClient`. + +client_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. + +client_states.go contains state handler functions. + +The following diagram illustrates the operation of the client state machine. This diagram is auto-generated from current code and should remain up to date over time: + +https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg + +*/ +package clientstates diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/doc.go new file mode 100644 index 00000000000..94b32998c92 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/doc.go @@ -0,0 +1,6 @@ +/* +Package retrievalimpl provides the primary implementation of retrieval market top level interfaces interfaces + +This package provides a production implementation of `RetrievalClient` and `RetrievalProvider`. +*/ +package retrievalimpl diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils.go new file mode 100644 index 00000000000..f1b415957df --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils.go @@ -0,0 +1,227 @@ +// Package dtutils provides event listeners for the client and provider to +// listen for events on the data transfer module and dispatch FSM events based on them +package dtutils + +import ( + "fmt" + "math" + + "github.com/ipfs/go-graphsync/storeutil" + bstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime" + peer "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine/fsm" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +var log = logging.Logger("retrievalmarket_impl") + +// EventReceiver is any thing that can receive FSM events +type EventReceiver interface { + Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) +} + +const noProviderEvent = rm.ProviderEvent(math.MaxUint64) + +func providerEvent(event datatransfer.Event, channelState datatransfer.ChannelState) (rm.ProviderEvent, []interface{}) { + switch event.Code { + case datatransfer.Accept: + return rm.ProviderEventDealAccepted, []interface{}{channelState.ChannelID()} + case datatransfer.Disconnected: + return rm.ProviderEventDataTransferError, []interface{}{fmt.Errorf("deal data transfer stalled (peer hungup)")} + case datatransfer.Error: + return rm.ProviderEventDataTransferError, []interface{}{fmt.Errorf("deal data transfer failed: %s", event.Message)} + case datatransfer.Cancel: + return rm.ProviderEventClientCancelled, nil + default: + return noProviderEvent, nil + } +} + +// ProviderDataTransferSubscriber is the function called when an event occurs in a data +// transfer received by a provider -- it reads the voucher to verify this event occurred +// in a storage market deal, then, based on the data transfer event that occurred, it generates +// and update message for the deal -- either moving to staged for a completion +// event or moving to error if a data transfer error occurs +func ProviderDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { + return func(event datatransfer.Event, channelState datatransfer.ChannelState) { + dealProposal, ok := dealProposalFromVoucher(channelState.Voucher()) + // if this event is for a transfer not related to storage, ignore + if !ok { + return + } + + if channelState.Status() == datatransfer.Completed { + err := deals.Send(rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: channelState.Recipient()}, rm.ProviderEventComplete) + if err != nil { + log.Errorf("processing dt event: %s", err) + } + } + + retrievalEvent, params := providerEvent(event, channelState) + if retrievalEvent == noProviderEvent { + return + } + log.Debugw("processing retrieval provider dt event", "event", datatransfer.Events[event.Code], "dealID", dealProposal.ID, "peer", channelState.OtherPeer(), "retrievalEvent", rm.ProviderEvents[retrievalEvent]) + + err := deals.Send(rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: channelState.Recipient()}, retrievalEvent, params...) + if err != nil { + log.Errorf("processing dt event: %s", err) + } + + } +} + +func clientEventForResponse(response *rm.DealResponse) (rm.ClientEvent, []interface{}) { + switch response.Status { + case rm.DealStatusRejected: + return rm.ClientEventDealRejected, []interface{}{response.Message} + case rm.DealStatusDealNotFound: + return rm.ClientEventDealNotFound, []interface{}{response.Message} + case rm.DealStatusAccepted: + return rm.ClientEventDealAccepted, nil + case rm.DealStatusFundsNeededUnseal: + return rm.ClientEventUnsealPaymentRequested, []interface{}{response.PaymentOwed} + case rm.DealStatusFundsNeededLastPayment: + return rm.ClientEventLastPaymentRequested, []interface{}{response.PaymentOwed} + case rm.DealStatusCompleted: + return rm.ClientEventComplete, nil + case rm.DealStatusFundsNeeded, rm.DealStatusOngoing: + return rm.ClientEventPaymentRequested, []interface{}{response.PaymentOwed} + default: + return rm.ClientEventUnknownResponseReceived, []interface{}{response.Status} + } +} + +const noEvent = rm.ClientEvent(math.MaxUint64) + +func clientEvent(event datatransfer.Event, channelState datatransfer.ChannelState) (rm.ClientEvent, []interface{}) { + switch event.Code { + case datatransfer.DataReceivedProgress: + return rm.ClientEventBlocksReceived, []interface{}{channelState.Received()} + case datatransfer.FinishTransfer: + return rm.ClientEventAllBlocksReceived, nil + case datatransfer.Cancel: + return rm.ClientEventProviderCancelled, nil + case datatransfer.NewVoucherResult: + response, ok := dealResponseFromVoucherResult(channelState.LastVoucherResult()) + if !ok { + log.Errorf("unexpected voucher result received: %s", channelState.LastVoucher().Type()) + return noEvent, nil + } + + return clientEventForResponse(response) + case datatransfer.Disconnected: + return rm.ClientEventDataTransferError, []interface{}{fmt.Errorf("deal data transfer stalled (peer hungup)")} + case datatransfer.Error: + if channelState.Message() == datatransfer.ErrRejected.Error() { + return rm.ClientEventDealRejected, []interface{}{"rejected for unknown reasons"} + } + return rm.ClientEventDataTransferError, []interface{}{fmt.Errorf("deal data transfer failed: %s", event.Message)} + default: + } + + return noEvent, nil +} + +// ClientDataTransferSubscriber is the function called when an event occurs in a data +// transfer initiated on the client -- it reads the voucher to verify this even occurred +// in a retrieval market deal, then, based on the data transfer event that occurred, it dispatches +// an event to the appropriate state machine +func ClientDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { + return func(event datatransfer.Event, channelState datatransfer.ChannelState) { + dealProposal, ok := dealProposalFromVoucher(channelState.Voucher()) + + // if this event is for a transfer not related to retrieval, ignore + if !ok { + return + } + + retrievalEvent, params := clientEvent(event, channelState) + + if retrievalEvent == noEvent { + return + } + log.Debugw("processing retrieval client dt event", "event", datatransfer.Events[event.Code], "dealID", dealProposal.ID, "peer", channelState.OtherPeer(), "retrievalEvent", rm.ClientEvents[retrievalEvent]) + + // data transfer events for progress do not affect deal state + err := deals.Send(dealProposal.ID, retrievalEvent, params...) + if err != nil { + log.Errorf("processing dt event %s for state %s: %s", + datatransfer.Events[event.Code], datatransfer.Statuses[channelState.Status()], err) + } + } +} + +// StoreGetter retrieves the store for a given id +type StoreGetter interface { + Get(otherPeer peer.ID, dealID rm.DealID) (bstore.Blockstore, error) +} + +// StoreConfigurableTransport defines the methods needed to +// configure a data transfer transport use a unique store for a given request +type StoreConfigurableTransport interface { + UseStore(datatransfer.ChannelID, ipld.LinkSystem) error +} + +// TransportConfigurer configurers the graphsync transport to use a custom blockstore per deal +func TransportConfigurer(thisPeer peer.ID, storeGetter StoreGetter) datatransfer.TransportConfigurer { + return func(channelID datatransfer.ChannelID, voucher datatransfer.Voucher, transport datatransfer.Transport) { + dealProposal, ok := dealProposalFromVoucher(voucher) + if !ok { + return + } + gsTransport, ok := transport.(StoreConfigurableTransport) + if !ok { + return + } + otherPeer := channelID.OtherParty(thisPeer) + store, err := storeGetter.Get(otherPeer, dealProposal.ID) + if err != nil { + log.Errorf("attempting to configure data store: %s", err) + return + } + if store == nil { + return + } + err = gsTransport.UseStore(channelID, storeutil.LinkSystemForBlockstore(store)) + if err != nil { + log.Errorf("attempting to configure data store: %s", err) + } + } +} + +func dealProposalFromVoucher(voucher datatransfer.Voucher) (*rm.DealProposal, bool) { + dealProposal, ok := voucher.(*rm.DealProposal) + // if this event is for a transfer not related to storage, ignore + if ok { + return dealProposal, true + } + + legacyProposal, ok := voucher.(*migrations.DealProposal0) + if !ok { + return nil, false + } + newProposal := migrations.MigrateDealProposal0To1(*legacyProposal) + return &newProposal, true +} + +func dealResponseFromVoucherResult(vres datatransfer.VoucherResult) (*rm.DealResponse, bool) { + dealResponse, ok := vres.(*rm.DealResponse) + // if this event is for a transfer not related to storage, ignore + if ok { + return dealResponse, true + } + + legacyResponse, ok := vres.(*migrations.DealResponse0) + if !ok { + return nil, false + } + newResponse := migrations.MigrateDealResponse0To1(*legacyResponse) + return &newResponse, true +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils_test.go new file mode 100644 index 00000000000..b439994b84a --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/dtutils/dtutils_test.go @@ -0,0 +1,507 @@ +package dtutils_test + +import ( + "context" + "errors" + "math/rand" + "testing" + + ds "github.com/ipfs/go-datastore" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-ipld-prime" + peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestProviderDataTransferSubscriber(t *testing.T) { + dealProposal := shared_testutil.MakeTestDealProposal() + legacyProposal := migrations.DealProposal0{ + PayloadCID: dealProposal.PayloadCID, + ID: dealProposal.ID, + Params0: migrations.Params0{ + Selector: dealProposal.Selector, + PieceCID: dealProposal.PieceCID, + PricePerByte: dealProposal.PricePerByte, + PaymentInterval: dealProposal.PaymentInterval, + PaymentIntervalIncrease: dealProposal.PaymentIntervalIncrease, + UnsealPrice: dealProposal.UnsealPrice, + }, + } + testPeers := shared_testutil.GeneratePeers(2) + transferID := datatransfer.TransferID(rand.Uint64()) + tests := map[string]struct { + code datatransfer.EventCode + message string + state shared_testutil.TestChannelParams + ignored bool + expectedID interface{} + expectedEvent fsm.EventName + expectedArgs []interface{} + }{ + "not a retrieval voucher": { + ignored: true, + }, + "accept": { + code: datatransfer.Accept, + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventDealAccepted, + expectedArgs: []interface{}{datatransfer.ChannelID{ID: transferID, Initiator: testPeers[1], Responder: testPeers[0]}}, + }, + "accept, legacy": { + code: datatransfer.Accept, + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&legacyProposal}, + Status: datatransfer.Ongoing}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventDealAccepted, + expectedArgs: []interface{}{datatransfer.ChannelID{ID: transferID, Initiator: testPeers[1], Responder: testPeers[0]}}, + }, + "error": { + code: datatransfer.Error, + message: "something went wrong", + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventDataTransferError, + expectedArgs: []interface{}{errors.New("deal data transfer failed: something went wrong")}, + }, + "disconnected": { + code: datatransfer.Disconnected, + message: "something went wrong", + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventDataTransferError, + expectedArgs: []interface{}{errors.New("deal data transfer stalled (peer hungup)")}, + }, + "completed": { + code: datatransfer.ResumeResponder, + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Completed}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventComplete, + }, + "cancel": { + code: datatransfer.Cancel, + state: shared_testutil.TestChannelParams{ + IsPull: true, + TransferID: transferID, + Sender: testPeers[0], + Recipient: testPeers[1], + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Completed}, + expectedID: rm.ProviderDealIdentifier{DealID: dealProposal.ID, Receiver: testPeers[1]}, + expectedEvent: rm.ProviderEventClientCancelled, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + fdg := &fakeDealGroup{} + subscriber := dtutils.ProviderDataTransferSubscriber(fdg) + subscriber(datatransfer.Event{Code: data.code, Message: data.message}, shared_testutil.NewTestChannel(data.state)) + if !data.ignored { + require.True(t, fdg.called) + require.Equal(t, fdg.lastID, data.expectedID) + require.Equal(t, fdg.lastEvent, data.expectedEvent) + require.Equal(t, fdg.lastArgs, data.expectedArgs) + } else { + require.False(t, fdg.called) + } + }) + } + +} +func TestClientDataTransferSubscriber(t *testing.T) { + dealProposal := shared_testutil.MakeTestDealProposal() + legacyProposal := migrations.DealProposal0{ + PayloadCID: dealProposal.PayloadCID, + ID: dealProposal.ID, + Params0: migrations.Params0{ + Selector: dealProposal.Selector, + PieceCID: dealProposal.PieceCID, + PricePerByte: dealProposal.PricePerByte, + PaymentInterval: dealProposal.PaymentInterval, + PaymentIntervalIncrease: dealProposal.PaymentIntervalIncrease, + UnsealPrice: dealProposal.UnsealPrice, + }, + } + paymentOwed := shared_testutil.MakeTestTokenAmount() + tests := map[string]struct { + code datatransfer.EventCode + message string + state shared_testutil.TestChannelParams + ignored bool + expectedID interface{} + expectedEvent fsm.EventName + expectedArgs []interface{} + }{ + "not a retrieval voucher": { + ignored: true, + }, + "progress": { + code: datatransfer.DataReceivedProgress, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing, + Received: 1000}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventBlocksReceived, + expectedArgs: []interface{}{uint64(1000)}, + }, + "finish transfer": { + code: datatransfer.FinishTransfer, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.TransferFinished}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventAllBlocksReceived, + }, + "cancel": { + code: datatransfer.Cancel, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventProviderCancelled, + }, + "new voucher result - rejected": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusRejected, + ID: dealProposal.ID, + Message: "something went wrong", + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDealRejected, + expectedArgs: []interface{}{"something went wrong"}, + }, + "new voucher result - not found": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusDealNotFound, + ID: dealProposal.ID, + Message: "something went wrong", + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDealNotFound, + expectedArgs: []interface{}{"something went wrong"}, + }, + "new voucher result - accepted": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusAccepted, + ID: dealProposal.ID, + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDealAccepted, + }, + "new voucher result - accepted, legacy": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&legacyProposal}, + VoucherResults: []datatransfer.VoucherResult{&migrations.DealResponse0{ + Status: retrievalmarket.DealStatusAccepted, + ID: dealProposal.ID, + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDealAccepted, + }, + "new voucher result - funds needed last payment": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusFundsNeededLastPayment, + ID: dealProposal.ID, + PaymentOwed: paymentOwed, + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventLastPaymentRequested, + expectedArgs: []interface{}{paymentOwed}, + }, + "new voucher result - completed": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusCompleted, + ID: dealProposal.ID, + }}, + Status: datatransfer.ResponderCompleted}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventComplete, + }, + "new voucher result - funds needed": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusFundsNeeded, + ID: dealProposal.ID, + PaymentOwed: paymentOwed, + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventPaymentRequested, + expectedArgs: []interface{}{paymentOwed}, + }, + "new voucher result - unexpected response": { + code: datatransfer.NewVoucherResult, + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + VoucherResults: []datatransfer.VoucherResult{&retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusPaymentChannelAddingFunds, + ID: dealProposal.ID, + }}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventUnknownResponseReceived, + expectedArgs: []interface{}{retrievalmarket.DealStatusPaymentChannelAddingFunds}, + }, + "error": { + code: datatransfer.Error, + message: "something went wrong", + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDataTransferError, + expectedArgs: []interface{}{errors.New("deal data transfer failed: something went wrong")}, + }, + "disconnected": { + code: datatransfer.Disconnected, + message: "something went wrong", + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDataTransferError, + expectedArgs: []interface{}{errors.New("deal data transfer stalled (peer hungup)")}, + }, + "error, response rejected": { + code: datatransfer.Error, + message: datatransfer.ErrRejected.Error(), + state: shared_testutil.TestChannelParams{ + Vouchers: []datatransfer.Voucher{&dealProposal}, + Status: datatransfer.Ongoing, + Message: datatransfer.ErrRejected.Error()}, + expectedID: dealProposal.ID, + expectedEvent: rm.ClientEventDealRejected, + expectedArgs: []interface{}{"rejected for unknown reasons"}, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + fdg := &fakeDealGroup{} + subscriber := dtutils.ClientDataTransferSubscriber(fdg) + subscriber(datatransfer.Event{Code: data.code, Message: data.message}, shared_testutil.NewTestChannel(data.state)) + if !data.ignored { + require.True(t, fdg.called) + require.Equal(t, fdg.lastID, data.expectedID) + require.Equal(t, fdg.lastEvent, data.expectedEvent) + require.Equal(t, fdg.lastArgs, data.expectedArgs) + } else { + require.False(t, fdg.called) + } + }) + } +} + +type fakeDealGroup struct { + returnedErr error + called bool + lastID interface{} + lastEvent fsm.EventName + lastArgs []interface{} +} + +func (fdg *fakeDealGroup) Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) { + fdg.lastID = id + fdg.lastEvent = name + fdg.lastArgs = args + fdg.called = true + return fdg.returnedErr +} + +func TestTransportConfigurer(t *testing.T) { + payloadCID := shared_testutil.GenerateCids(1)[0] + expectedChannelID := shared_testutil.MakeTestChannelID() + expectedDealID := rm.DealID(rand.Uint64()) + thisPeer := expectedChannelID.Initiator + expectedPeer := expectedChannelID.Responder + + testCases := map[string]struct { + voucher datatransfer.Voucher + transport datatransfer.Transport + returnedStore bstore.Blockstore + returnedStoreErr error + getterCalled bool + useStoreCalled bool + }{ + "non-storage voucher": { + voucher: nil, + getterCalled: false, + }, + "non-configurable transport": { + voucher: &rm.DealProposal{ + PayloadCID: payloadCID, + ID: expectedDealID, + }, + transport: &fakeTransport{}, + getterCalled: false, + }, + "store getter errors": { + voucher: &rm.DealProposal{ + PayloadCID: payloadCID, + ID: expectedDealID, + }, + transport: &fakeGsTransport{Transport: &fakeTransport{}}, + getterCalled: true, + useStoreCalled: false, + returnedStore: nil, + returnedStoreErr: errors.New("something went wrong"), + }, + "store getter succeeds": { + voucher: &rm.DealProposal{ + PayloadCID: payloadCID, + ID: expectedDealID, + }, + transport: &fakeGsTransport{Transport: &fakeTransport{}}, + getterCalled: true, + useStoreCalled: true, + returnedStore: bstore.NewBlockstore(ds.NewMapDatastore()), + returnedStoreErr: nil, + }, + "store getter succeeds, legacy": { + voucher: &migrations.DealProposal0{ + PayloadCID: payloadCID, + ID: expectedDealID, + }, + transport: &fakeGsTransport{Transport: &fakeTransport{}}, + getterCalled: true, + useStoreCalled: true, + returnedStore: bstore.NewBlockstore(ds.NewMapDatastore()), + returnedStoreErr: nil, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + storeGetter := &fakeStoreGetter{returnedErr: data.returnedStoreErr, returnedStore: data.returnedStore} + transportConfigurer := dtutils.TransportConfigurer(thisPeer, storeGetter) + transportConfigurer(expectedChannelID, data.voucher, data.transport) + if data.getterCalled { + require.True(t, storeGetter.called) + require.Equal(t, expectedDealID, storeGetter.lastDealID) + require.Equal(t, expectedPeer, storeGetter.lastOtherPeer) + fgt, ok := data.transport.(*fakeGsTransport) + require.True(t, ok) + if data.useStoreCalled { + require.True(t, fgt.called) + require.Equal(t, expectedChannelID, fgt.lastChannelID) + } else { + require.False(t, fgt.called) + } + } else { + require.False(t, storeGetter.called) + } + }) + } +} + +type fakeStoreGetter struct { + lastDealID rm.DealID + lastOtherPeer peer.ID + returnedErr error + returnedStore bstore.Blockstore + called bool +} + +func (fsg *fakeStoreGetter) Get(otherPeer peer.ID, dealID rm.DealID) (bstore.Blockstore, error) { + fsg.lastDealID = dealID + fsg.lastOtherPeer = otherPeer + fsg.called = true + return fsg.returnedStore, fsg.returnedErr +} + +type fakeTransport struct{} + +var _ datatransfer.Transport = (*fakeTransport)(nil) + +func (ft *fakeTransport) OpenChannel(ctx context.Context, dataSender peer.ID, channelID datatransfer.ChannelID, root ipld.Link, stor ipld.Node, channel datatransfer.ChannelState, msg datatransfer.Message) error { + return nil +} + +func (ft *fakeTransport) CloseChannel(ctx context.Context, chid datatransfer.ChannelID) error { + return nil +} + +func (ft *fakeTransport) SetEventHandler(events datatransfer.EventsHandler) error { + return nil +} + +func (ft *fakeTransport) CleanupChannel(chid datatransfer.ChannelID) { +} + +func (ft *fakeTransport) Shutdown(context.Context) error { + return nil +} + +type fakeGsTransport struct { + datatransfer.Transport + lastChannelID datatransfer.ChannelID + lastLinkSystem ipld.LinkSystem + called bool +} + +func (fgt *fakeGsTransport) UseStore(channelID datatransfer.ChannelID, lsys ipld.LinkSystem) error { + fgt.lastChannelID = channelID + fgt.lastLinkSystem = lsys + fgt.called = true + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem.txt b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem.txt new file mode 100644 index 00000000000..fd4a2f3c1ff --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem.txt @@ -0,0 +1,49 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. + +Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. + +Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. + +Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. + +Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. + +Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. + +Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. + +Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. + +Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. + +Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. + +Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. + +Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. + +Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. + +Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. + +Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. + +Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. + +Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. + +Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. + +Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. + +Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. + +Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. + +Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. + +Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. + +Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. + +Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem_under_1_block.txt b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem_under_1_block.txt new file mode 100644 index 00000000000..8a5b223df62 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/fixtures/lorem_under_1_block.txt @@ -0,0 +1 @@ +Aliquam sollicitudin diam non pellentesque eleifend. Phasellus at mauris id est interdum feugiat. Morbi lobortis quam eget nulla pulvinar, ac maximus dui consequat. Donec ut mauris faucibus nulla finibus cursus. In iaculis, est vitae viverra dignissim, sem nulla hendrerit augue, sed mollis magna libero in odio. Morbi interdum lacus pellentesque pulvinar bibendum. Cras ac ultrices tortor, nec lobortis lorem. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/integration_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/integration_test.go new file mode 100644 index 00000000000..679a269d361 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/integration_test.go @@ -0,0 +1,741 @@ +package retrievalimpl_test + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + graphsyncimpl "github.com/ipfs/go-graphsync/impl" + "github.com/ipfs/go-graphsync/network" + "github.com/ipld/go-car" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + "github.com/filecoin-project/go-data-transfer/testutil" + dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + rmtesting "github.com/filecoin-project/go-fil-markets/retrievalmarket/testing" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/stores" +) + +func TestClientCanMakeQueryToProvider(t *testing.T) { + bgCtx := context.Background() + payChAddr := address.TestAddress + + client, expectedCIDs, missingPiece, expectedQR, retrievalPeer, _, pieceStore := requireSetupTestClientAndProvider(bgCtx, t, payChAddr) + + t.Run("when piece is found, returns piece and price data", func(t *testing.T) { + expectedQR.Status = retrievalmarket.QueryResponseAvailable + actualQR, err := client.Query(bgCtx, retrievalPeer, expectedCIDs[0], retrievalmarket.QueryParams{}) + + assert.NoError(t, err) + assert.Equal(t, expectedQR, actualQR) + }) + + t.Run("when piece is not found, returns unavailable", func(t *testing.T) { + expectedQR.PieceCIDFound = retrievalmarket.QueryItemUnavailable + expectedQR.Status = retrievalmarket.QueryResponseUnavailable + expectedQR.Message = "piece info for cid not found (deal has not been added to a piece yet)" + expectedQR.Size = 0 + actualQR, err := client.Query(bgCtx, retrievalPeer, missingPiece, retrievalmarket.QueryParams{}) + actualQR.MaxPaymentInterval = expectedQR.MaxPaymentInterval + actualQR.MinPricePerByte = expectedQR.MinPricePerByte + actualQR.MaxPaymentIntervalIncrease = expectedQR.MaxPaymentIntervalIncrease + actualQR.UnsealPrice = expectedQR.UnsealPrice + assert.NoError(t, err) + assert.Equal(t, expectedQR, actualQR) + }) + + t.Run("when there is some other error, returns error", func(t *testing.T) { + pieceStore.ReturnErrorFromGetPieceInfo(xerrors.Errorf("someerr")) + expectedQR.Status = retrievalmarket.QueryResponseError + expectedQR.PieceCIDFound = retrievalmarket.QueryItemUnavailable + expectedQR.Size = 0 + expectedQR.Message = "failed to fetch piece to retrieve from: could not locate piece: someerr" + actualQR, err := client.Query(bgCtx, retrievalPeer, expectedCIDs[0], retrievalmarket.QueryParams{}) + assert.NoError(t, err) + actualQR.MaxPaymentInterval = expectedQR.MaxPaymentInterval + actualQR.MinPricePerByte = expectedQR.MinPricePerByte + actualQR.MaxPaymentIntervalIncrease = expectedQR.MaxPaymentIntervalIncrease + actualQR.UnsealPrice = expectedQR.UnsealPrice + assert.Equal(t, expectedQR, actualQR) + }) + +} + +func TestProvider_Stop(t *testing.T) { + if testing.Short() { + t.Skip() + } + bgCtx := context.Background() + payChAddr := address.TestAddress + client, expectedCIDs, _, _, retrievalPeer, provider, _ := requireSetupTestClientAndProvider(bgCtx, t, payChAddr) + require.NoError(t, provider.Stop()) + _, err := client.Query(bgCtx, retrievalPeer, expectedCIDs[0], retrievalmarket.QueryParams{}) + + assert.EqualError(t, err, "exhausted 5 attempts but failed to open stream, err: protocol not supported") +} + +func requireSetupTestClientAndProvider(ctx context.Context, t *testing.T, payChAddr address.Address) ( + retrievalmarket.RetrievalClient, + []cid.Cid, + cid.Cid, + retrievalmarket.QueryResponse, + retrievalmarket.RetrievalPeer, + retrievalmarket.RetrievalProvider, + *tut.TestPieceStore, +) { + testData := tut.NewLibp2pTestData(ctx, t) + nw1 := rmnet.NewFromLibp2pHost(testData.Host1, rmnet.RetryParameters(100*time.Millisecond, 1*time.Second, 5, 5)) + cids := tut.GenerateCids(2) + rcNode1 := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{ + PayCh: payChAddr, + CreatePaychCID: cids[0], + AddFundsCID: cids[1], + }) + + gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.LinkSystem1) + dtTransport1 := dtgstransport.NewTransport(testData.Host1.ID(), gs1) + dt1, err := dtimpl.NewDataTransfer(testData.DTStore1, testData.DTNet1, dtTransport1) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt1) + require.NoError(t, err) + clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) + ba := tut.NewTestRetrievalBlockstoreAccessor() + client, err := retrievalimpl.NewClient(nw1, dt1, rcNode1, &tut.TestPeerResolver{}, clientDs, ba) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, client) + nw2 := rmnet.NewFromLibp2pHost(testData.Host2, rmnet.RetryParameters(0, 0, 0, 0)) + providerNode := testnodes.NewTestRetrievalProviderNode() + sectorAccessor := testnodes.NewTestSectorAccessor() + pieceStore := tut.NewTestPieceStore() + expectedCIDs := tut.GenerateCids(3) + expectedPieceCIDs := tut.GenerateCids(3) + missingCID := tut.GenerateCids(1)[0] + expectedQR := tut.MakeTestQueryResponse() + dagstoreWrapper := tut.NewMockDagStoreWrapper(pieceStore, sectorAccessor) + + pieceStore.ExpectMissingCID(missingCID) + for i, c := range expectedCIDs { + pieceStore.ExpectCID(c, piecestore.CIDInfo{ + PieceBlockLocations: []piecestore.PieceBlockLocation{ + { + PieceCID: expectedPieceCIDs[i], + }, + }, + }) + dagstoreWrapper.AddBlockToPieceIndex(c, expectedPieceCIDs[i]) + } + for i, piece := range expectedPieceCIDs { + pieceStore.ExpectPiece(piece, piecestore.PieceInfo{ + Deals: []piecestore.DealInfo{ + { + Length: abi.PaddedPieceSize(expectedQR.Size * uint64(i+1)), + }, + }, + }) + } + + paymentAddress := address.TestAddress2 + + gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), testData.LinkSystem2) + dtTransport2 := dtgstransport.NewTransport(testData.Host2.ID(), gs2) + dt2, err := dtimpl.NewDataTransfer(testData.DTStore2, testData.DTNet2, dtTransport2) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt2) + require.NoError(t, err) + providerDs := namespace.Wrap(testData.Ds2, datastore.NewKey("/retrievals/provider")) + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + ask.PaymentInterval = expectedQR.MaxPaymentInterval + ask.PaymentIntervalIncrease = expectedQR.MaxPaymentIntervalIncrease + ask.PricePerByte = expectedQR.MinPricePerByte + ask.UnsealPrice = expectedQR.UnsealPrice + return ask, nil + } + + provider, err := retrievalimpl.NewProvider( + paymentAddress, providerNode, sectorAccessor, nw2, pieceStore, dagstoreWrapper, dt2, providerDs, + priceFunc) + require.NoError(t, err) + + tut.StartAndWaitForReady(ctx, t, provider) + retrievalPeer := retrievalmarket.RetrievalPeer{ + Address: paymentAddress, + ID: testData.Host2.ID(), + } + rcNode1.ExpectKnownAddresses(retrievalPeer, nil) + + expectedQR.Size = uint64(abi.PaddedPieceSize(expectedQR.Size).Unpadded()) + + return client, expectedCIDs, missingCID, expectedQR, retrievalPeer, provider, pieceStore +} + +func TestClientCanMakeDealWithProvider(t *testing.T) { + // -------- SET UP PROVIDER + + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + + partialSelector := ssb.ExploreFields(func(specBuilder builder.ExploreFieldsSpecBuilder) { + specBuilder.Insert("Links", ssb.ExploreIndex(0, ssb.ExploreFields(func(specBuilder builder.ExploreFieldsSpecBuilder) { + specBuilder.Insert("Hash", ssb.Matcher()) + }))) + }).Node() + + var customDeciderRan bool + + testCases := []struct { + name string + decider retrievalimpl.DealDecider + filename string + filesize uint64 + voucherAmts []abi.TokenAmount + selector ipld.Node + unsealPrice abi.TokenAmount + zeroPricePerByte bool + paramsV1, addFunds bool + skipStores bool + failsUnseal bool + paymentInterval uint64 + paymentIntervalIncrease uint64 + channelAvailableFunds retrievalmarket.ChannelAvailableFunds + fundsReplenish abi.TokenAmount + cancelled bool + disableNewDeals bool + }{ + {name: "1 block file retrieval succeeds", + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, + addFunds: false, + }, + {name: "1 block file retrieval succeeds with unseal price", + filename: "lorem_under_1_block.txt", + filesize: 410, + unsealPrice: abi.NewTokenAmount(100), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(410100)}, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + paramsV1: true, + }, + {name: "1 block file retrieval succeeds with existing payment channel", + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, + addFunds: true}, + {name: "1 block file retrieval succeeds, but waits for other payment channel funds to land", + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, + channelAvailableFunds: retrievalmarket.ChannelAvailableFunds{ + // this is bit contrived, but we're simulating other deals expending the funds by setting the initial confirmed to negative + // when funds get added on initial create, it will reset to zero + // which will trigger a later voucher shortfall and then waiting for both + // the pending and then the queued amounts + ConfirmedAmt: abi.NewTokenAmount(-410000), + PendingAmt: abi.NewTokenAmount(200000), + PendingWaitSentinel: &tut.GenerateCids(1)[0], + QueuedAmt: abi.NewTokenAmount(210000), + }, + }, + {name: "1 block file retrieval succeeds, after insufficient funds and restart", + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, + channelAvailableFunds: retrievalmarket.ChannelAvailableFunds{ + // this is bit contrived, but we're simulating other deals expending the funds by setting the initial confirmed to negative + // when funds get added on initial create, it will reset to zero + // which will trigger a later voucher shortfall + ConfirmedAmt: abi.NewTokenAmount(-410000), + }, + fundsReplenish: abi.NewTokenAmount(410000), + }, + {name: "1 block file retrieval cancelled after insufficient funds", + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{}, + channelAvailableFunds: retrievalmarket.ChannelAvailableFunds{ + // this is bit contrived, but we're simulating other deals expending the funds by setting the initial confirmed to negative + // when funds get added on initial create, it will reset to zero + // which will trigger a later voucher shortfall + ConfirmedAmt: abi.NewTokenAmount(-410000), + }, + cancelled: true, + }, + {name: "multi-block file retrieval succeeds", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + }, + {name: "multi-block file retrieval with zero price per byte succeeds", + filename: "lorem.txt", + filesize: 19000, + zeroPricePerByte: true, + }, + {name: "multi-block file retrieval succeeds with V1 params and AllSelector", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + paramsV1: true, + selector: selectorparse.CommonSelector_ExploreAllRecursively}, + {name: "partial file retrieval succeeds with V1 params and selector recursion depth 1", + filename: "lorem.txt", + filesize: 1024, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1982000)}, + paramsV1: true, + selector: partialSelector}, + {name: "succeeds when using a custom decider function", + decider: func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + customDeciderRan = true + return true, "", nil + }, + filename: "lorem_under_1_block.txt", + filesize: 410, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, + }, + {name: "succeeds for regular blockstore", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + skipStores: true, + }, + { + name: "failed unseal", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{}, + failsUnseal: true, + }, + + {name: "multi-block file retrieval succeeds, final block exceeds payment interval", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(9150000), abi.NewTokenAmount(19390000), abi.NewTokenAmount(19958000)}, + paymentInterval: 9000, + paymentIntervalIncrease: 1250, + }, + + {name: "multi-block file retrieval succeeds, final block lands on payment interval", + filename: "lorem.txt", + filesize: 19000, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(9150000), abi.NewTokenAmount(19958000)}, + // Total bytes: 19,920 + // intervals: 9,000 | 9,000 + (9,000 + 1920) + paymentInterval: 9000, + paymentIntervalIncrease: 1920, + }, + + {name: "multi-block file retrieval succeeds, with provider only accepting legacy deals", + filename: "lorem.txt", + filesize: 19000, + disableNewDeals: true, + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + }, + } + + for i, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bgCtx := context.Background() + clientPaymentChannel, err := address.NewIDAddress(uint64(i * 10)) + require.NoError(t, err) + + testData := tut.NewLibp2pTestData(bgCtx, t) + + // Create a CARv2 file from a fixture + fpath := filepath.Join(tut.ThisDir(t), "./fixtures/"+testCase.filename) + pieceLink, path := testData.LoadUnixFSFileToStore(t, fpath) + c, ok := pieceLink.(cidlink.Link) + require.True(t, ok) + payloadCID := c.Cid + + // Get the CARv1 payload of the UnixFS DAG that the (Filestore backed by the CARv2) contains. + carFile, err := os.CreateTemp(t.TempDir(), "rand") + require.NoError(t, err) + + fs, err := stores.ReadOnlyFilestore(path) + require.NoError(t, err) + + sc := car.NewSelectiveCar(bgCtx, fs, []car.Dag{{Root: payloadCID, Selector: selectorparse.CommonSelector_ExploreAllRecursively}}) + prepared, err := sc.Prepare() + require.NoError(t, err) + carBuf := new(bytes.Buffer) + require.NoError(t, prepared.Dump(context.TODO(), carBuf)) + carDataBuf := new(bytes.Buffer) + tr := io.TeeReader(carBuf, carDataBuf) + require.NoError(t, fs.Close()) + _, err = io.Copy(carFile, tr) + require.NoError(t, err) + require.NoError(t, carFile.Close()) + carData := carDataBuf.Bytes() + + // Set up retrieval parameters + providerPaymentAddr, err := address.NewIDAddress(uint64(i * 99)) + require.NoError(t, err) + paymentInterval := testCase.paymentInterval + if paymentInterval == 0 { + paymentInterval = uint64(10000) + } + paymentIntervalIncrease := testCase.paymentIntervalIncrease + if paymentIntervalIncrease == 0 { + paymentIntervalIncrease = uint64(1000) + } + pricePerByte := abi.NewTokenAmount(1000) + if testCase.zeroPricePerByte { + pricePerByte = abi.NewTokenAmount(0) + } + unsealPrice := testCase.unsealPrice + if unsealPrice.Int == nil { + unsealPrice = big.Zero() + } + + expectedQR := retrievalmarket.QueryResponse{ + Size: 1024, + PaymentAddress: providerPaymentAddr, + MinPricePerByte: pricePerByte, + MaxPaymentInterval: paymentInterval, + MaxPaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: unsealPrice, + } + + // Set up the piece info that will be retrieved by the provider + // when the retrieval request is made + sectorID := abi.SectorNumber(100000) + offset := abi.PaddedPieceSize(1000) + pieceInfo := piecestore.PieceInfo{ + PieceCID: tut.GenerateCids(1)[0], + Deals: []piecestore.DealInfo{ + { + DealID: abi.DealID(100), + SectorID: sectorID, + Offset: offset, + Length: abi.UnpaddedPieceSize(len(carData)).Padded(), + }, + }, + } + providerNode := testnodes.NewTestRetrievalProviderNode() + providerNode.ExpectPricingParams(pieceInfo.PieceCID, []abi.DealID{100}) + + sectorAccessor := testnodes.NewTestSectorAccessor() + if testCase.failsUnseal { + sectorAccessor.ExpectFailedUnseal(sectorID, offset.Unpadded(), abi.UnpaddedPieceSize(len(carData))) + } else { + sectorAccessor.ExpectUnseal(sectorID, offset.Unpadded(), abi.UnpaddedPieceSize(len(carData)), carData) + } + + decider := rmtesting.TrivialTestDecider + if testCase.decider != nil { + decider = testCase.decider + } + + // ------- SET UP CLIENT + ctx, cancel := context.WithTimeout(bgCtx, 60*time.Second) + defer cancel() + + provider := setupProvider(bgCtx, t, testData, payloadCID, pieceInfo, carFile.Name(), expectedQR, + providerPaymentAddr, providerNode, sectorAccessor, decider, testCase.disableNewDeals) + tut.StartAndWaitForReady(ctx, t, provider) + + retrievalPeer := retrievalmarket.RetrievalPeer{Address: providerPaymentAddr, ID: testData.Host2.ID()} + + expectedVoucher := tut.MakeTestSignedVoucher() + + // just make sure there is enough to cover the transfer + expectedTotal := big.Mul(pricePerByte, abi.NewTokenAmount(int64(len(carData)))) + + // voucherAmts are pulled from the actual answer so the expected keys in the test node match up. + // later we compare the voucher values. The last voucherAmt is a remainder + proof := []byte("") + for _, voucherAmt := range testCase.voucherAmts { + require.NoError(t, providerNode.ExpectVoucher(clientPaymentChannel, expectedVoucher, proof, voucherAmt, voucherAmt, nil)) + } + + nw1 := rmnet.NewFromLibp2pHost(testData.Host1, rmnet.RetryParameters(0, 0, 0, 0)) + createdChan, newLaneAddr, createdVoucher, clientNode, client, ba, err := setupClient(bgCtx, t, clientPaymentChannel, expectedVoucher, nw1, testData, testCase.addFunds, testCase.channelAvailableFunds) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, client) + + clientNode.ExpectKnownAddresses(retrievalPeer, nil) + + clientDealStateChan := make(chan retrievalmarket.ClientDealState) + client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { + switch state.Status { + case retrievalmarket.DealStatusCompleted, retrievalmarket.DealStatusCancelled, retrievalmarket.DealStatusErrored: + clientDealStateChan <- state + return + } + if state.Status == retrievalmarket.DealStatusInsufficientFunds { + if !testCase.fundsReplenish.Nil() { + clientNode.ResetChannelAvailableFunds(retrievalmarket.ChannelAvailableFunds{ + ConfirmedAmt: testCase.fundsReplenish, + }) + client.TryRestartInsufficientFunds(state.PaymentInfo.PayCh) + } + if testCase.cancelled { + client.CancelDeal(state.ID) + } + } + msg := ` +Client: +Event: %s +Status: %s +TotalReceived: %d +BytesPaidFor: %d +CurrentInterval: %d +TotalFunds: %s +Message: %s +` + t.Logf(msg, retrievalmarket.ClientEvents[event], retrievalmarket.DealStatuses[state.Status], state.TotalReceived, state.BytesPaidFor, state.CurrentInterval, + state.TotalFunds.String(), state.Message) + }) + + providerDealStateChan := make(chan retrievalmarket.ProviderDealState) + provider.SubscribeToEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { + switch state.Status { + case retrievalmarket.DealStatusCompleted, retrievalmarket.DealStatusCancelled, retrievalmarket.DealStatusErrored: + providerDealStateChan <- state + return + } + msg := ` +Provider: +Event: %s +Status: %s +TotalSent: %d +FundsReceived: %s +Message: %s +CurrentInterval: %d +` + t.Logf(msg, retrievalmarket.ProviderEvents[event], retrievalmarket.DealStatuses[state.Status], state.TotalSent, state.FundsReceived.String(), state.Message, + state.CurrentInterval) + + }) + // **** Send the query for the Piece + // set up retrieval params + resp, err := client.Query(bgCtx, retrievalPeer, payloadCID, retrievalmarket.QueryParams{}) + require.NoError(t, err) + require.Equal(t, retrievalmarket.QueryResponseAvailable, resp.Status) + + var rmParams retrievalmarket.Params + if testCase.paramsV1 { + rmParams, err = retrievalmarket.NewParamsV1(pricePerByte, paymentInterval, paymentIntervalIncrease, testCase.selector, nil, unsealPrice) + require.NoError(t, err) + } else { + rmParams = retrievalmarket.NewParamsV0(pricePerByte, paymentInterval, paymentIntervalIncrease) + } + + // *** Retrieve the piece + _, err = client.Retrieve(bgCtx, 0, payloadCID, rmParams, expectedTotal, retrievalPeer, clientPaymentChannel, retrievalPeer.Address) + require.NoError(t, err) + + // verify that client subscribers will be notified of state changes + var clientDealState retrievalmarket.ClientDealState + select { + case <-ctx.Done(): + t.Error("deal never completed") + t.FailNow() + case clientDealState = <-clientDealStateChan: + } + if testCase.failsUnseal || testCase.cancelled { + assert.Equal(t, retrievalmarket.DealStatusCancelled, clientDealState.Status) + } else { + if !testCase.zeroPricePerByte { + assert.Equal(t, clientDealState.PaymentInfo.Lane, expectedVoucher.Lane) + require.NotNil(t, createdChan) + require.Equal(t, expectedTotal, createdChan.amt) + require.Equal(t, clientPaymentChannel, *newLaneAddr) + + // verify that the voucher was saved/seen by the client with correct values + require.NotNil(t, createdVoucher) + tut.TestVoucherEquality(t, createdVoucher, expectedVoucher) + } + assert.Equal(t, retrievalmarket.DealStatusCompleted, clientDealState.Status) + } + + ctxProv, cancelProv := context.WithTimeout(bgCtx, 10*time.Second) + defer cancelProv() + var providerDealState retrievalmarket.ProviderDealState + select { + case <-ctxProv.Done(): + t.Error("provider never saw completed deal") + t.FailNow() + case providerDealState = <-providerDealStateChan: + } + + if testCase.failsUnseal { + tut.AssertRetrievalDealState(t, retrievalmarket.DealStatusErrored, providerDealState.Status) + } else if testCase.cancelled { + tut.AssertRetrievalDealState(t, retrievalmarket.DealStatusCancelled, providerDealState.Status) + } else { + tut.AssertRetrievalDealState(t, retrievalmarket.DealStatusCompleted, providerDealState.Status) + } + // TODO this is terrible, but it's temporary until the test harness refactor + // in the resuming retrieval deals branch is done + // https://github.com/filecoin-project/go-fil-markets/issues/65 + if testCase.decider != nil { + assert.True(t, customDeciderRan) + } + // verify that the nodes we interacted with behaved as expected + clientNode.VerifyExpectations(t) + providerNode.VerifyExpectations(t) + sectorAccessor.VerifyExpectations(t) + if !testCase.failsUnseal && !testCase.cancelled { + testData.VerifyFileTransferredIntoStore(t, pieceLink, ba.Blockstore, testCase.filesize) + } + }) + } +} + +func setupClient( + ctx context.Context, + t *testing.T, + clientPaymentChannel address.Address, + expectedVoucher *paych.SignedVoucher, + nw1 rmnet.RetrievalMarketNetwork, + testData *tut.Libp2pTestData, + addFunds bool, + channelAvailableFunds retrievalmarket.ChannelAvailableFunds, +) ( + *pmtChan, + *address.Address, + *paych.SignedVoucher, + *testnodes.TestRetrievalClientNode, + retrievalmarket.RetrievalClient, + *tut.TestRetrievalBlockstoreAccessor, + error) { + var createdChan pmtChan + paymentChannelRecorder := func(client, miner address.Address, amt abi.TokenAmount) { + createdChan = pmtChan{client, miner, amt} + } + + var newLaneAddr address.Address + laneRecorder := func(paymentChannel address.Address) { + newLaneAddr = paymentChannel + } + + var createdVoucher paych.SignedVoucher + paymentVoucherRecorder := func(v *paych.SignedVoucher) { + createdVoucher = *v + } + cids := tut.GenerateCids(2) + clientNode := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{ + AddFundsOnly: addFunds, + PayCh: clientPaymentChannel, + Lane: expectedVoucher.Lane, + Voucher: expectedVoucher, + PaymentChannelRecorder: paymentChannelRecorder, + AllocateLaneRecorder: laneRecorder, + PaymentVoucherRecorder: paymentVoucherRecorder, + CreatePaychCID: cids[0], + AddFundsCID: cids[1], + IntegrationTest: true, + ChannelAvailableFunds: channelAvailableFunds, + }) + + gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.LinkSystem1) + dtTransport1 := dtgstransport.NewTransport(testData.Host1.ID(), gs1) + dt1, err := dtimpl.NewDataTransfer(testData.DTStore1, testData.DTNet1, dtTransport1) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt1) + require.NoError(t, err) + clientDs := namespace.Wrap(testData.Ds1, datastore.NewKey("/retrievals/client")) + ba := tut.NewTestRetrievalBlockstoreAccessor() + + client, err := retrievalimpl.NewClient(nw1, dt1, clientNode, &tut.TestPeerResolver{}, clientDs, ba) + return &createdChan, &newLaneAddr, &createdVoucher, clientNode, client, ba, err +} + +func setupProvider( + ctx context.Context, + t *testing.T, + testData *tut.Libp2pTestData, + payloadCID cid.Cid, + pieceInfo piecestore.PieceInfo, + carFilePath string, + expectedQR retrievalmarket.QueryResponse, + providerPaymentAddr address.Address, + providerNode retrievalmarket.RetrievalProviderNode, + sectorAccessor retrievalmarket.SectorAccessor, + decider retrievalimpl.DealDecider, + disableNewDeals bool, +) retrievalmarket.RetrievalProvider { + nw2 := rmnet.NewFromLibp2pHost(testData.Host2, rmnet.RetryParameters(0, 0, 0, 0)) + pieceStore := tut.NewTestPieceStore() + expectedPiece := pieceInfo.PieceCID + cidInfo := piecestore.CIDInfo{ + PieceBlockLocations: []piecestore.PieceBlockLocation{ + { + PieceCID: expectedPiece, + }, + }, + } + pieceStore.ExpectCID(payloadCID, cidInfo) + pieceStore.ExpectPiece(expectedPiece, pieceInfo) + + gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), testData.LinkSystem2) + dtTransport2 := dtgstransport.NewTransport(testData.Host2.ID(), gs2) + dt2, err := dtimpl.NewDataTransfer(testData.DTStore2, testData.DTNet2, dtTransport2) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt2) + require.NoError(t, err) + providerDs := namespace.Wrap(testData.Ds2, datastore.NewKey("/retrievals/provider")) + + opts := []retrievalimpl.RetrievalProviderOption{retrievalimpl.DealDeciderOpt(decider)} + if disableNewDeals { + opts = append(opts, retrievalimpl.DisableNewDeals()) + } + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + ask.PaymentInterval = expectedQR.MaxPaymentInterval + ask.PaymentIntervalIncrease = expectedQR.MaxPaymentIntervalIncrease + ask.PricePerByte = expectedQR.MinPricePerByte + ask.UnsealPrice = expectedQR.UnsealPrice + return ask, nil + } + + // Create a DAG store wrapper + dagstoreWrapper := tut.NewMockDagStoreWrapper(pieceStore, sectorAccessor) + dagstoreWrapper.AddBlockToPieceIndex(payloadCID, pieceInfo.PieceCID) + + // Register the piece with the DAG store wrapper + err = stores.RegisterShardSync(ctx, dagstoreWrapper, pieceInfo.PieceCID, carFilePath, true) + require.NoError(t, err) + + // Remove the CAR file so that the provider is forced to unseal the data + // (instead of using the cached CAR file) + _ = os.Remove(carFilePath) + + provider, err := retrievalimpl.NewProvider(providerPaymentAddr, providerNode, sectorAccessor, + nw2, pieceStore, dagstoreWrapper, dt2, providerDs, priceFunc, opts...) + require.NoError(t, err) + + return provider +} + +type pmtChan struct { + client, miner address.Address + amt abi.TokenAmount +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore.go new file mode 100644 index 00000000000..ee13dc1c6ff --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore.go @@ -0,0 +1,94 @@ +package retrievalimpl + +import ( + "context" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + + "github.com/filecoin-project/dagstore" +) + +// lazyBlockstore is a read-only wrapper around a Blockstore that is loaded +// lazily when one of its methods are called +type lazyBlockstore struct { + lk sync.Mutex + bs dagstore.ReadBlockstore + load func() (dagstore.ReadBlockstore, error) +} + +func newLazyBlockstore(load func() (dagstore.ReadBlockstore, error)) *lazyBlockstore { + return &lazyBlockstore{ + load: load, + } +} + +func (l *lazyBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + panic("cannot call DeleteBlock on read-only blockstore") +} + +func (l *lazyBlockstore) Put(ctx context.Context, block blocks.Block) error { + panic("cannot call Put on read-only blockstore") +} + +func (l *lazyBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { + panic("cannot call PutMany on read-only blockstore") +} + +func (l *lazyBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + bs, err := l.init() + if err != nil { + return false, err + } + return bs.Has(ctx, c) +} + +func (l *lazyBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bs, err := l.init() + if err != nil { + return nil, err + } + return bs.Get(ctx, c) +} + +func (l *lazyBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + bs, err := l.init() + if err != nil { + return 0, err + } + return bs.GetSize(ctx, c) +} + +func (l *lazyBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + bs, err := l.init() + if err != nil { + return nil, err + } + return bs.AllKeysChan(ctx) +} + +func (l *lazyBlockstore) HashOnRead(enabled bool) { + bs, err := l.init() + if err != nil { + return + } + bs.HashOnRead(enabled) +} + +func (l *lazyBlockstore) init() (dagstore.ReadBlockstore, error) { + l.lk.Lock() + defer l.lk.Unlock() + + if l.bs == nil { + var err error + l.bs, err = l.load() + if err != nil { + return nil, err + } + } + return l.bs, nil +} + +var _ bstore.Blockstore = (*lazyBlockstore)(nil) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore_test.go new file mode 100644 index 00000000000..1c373a0fa7e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/lazyblockstore_test.go @@ -0,0 +1,124 @@ +package retrievalimpl + +import ( + "context" + "testing" + + ds "github.com/ipfs/go-datastore" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/dagstore" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestLazyBlockstoreGet(t *testing.T) { + ctx := context.TODO() + b := shared_testutil.GenerateBlocksOfSize(1, 1024)[0] + + ds := ds.NewMapDatastore() + bs := bstore.NewBlockstore(ds) + err := bs.Put(ctx, b) + require.NoError(t, err) + + lbs := newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + return bs, nil + }) + + blk, err := lbs.Get(ctx, b.Cid()) + require.NoError(t, err) + require.Equal(t, b, blk) +} + +func TestLazyBlockstoreAllKeysChan(t *testing.T) { + ctx := context.TODO() + blks := shared_testutil.GenerateBlocksOfSize(2, 1024) + + ds := ds.NewMapDatastore() + bs := bstore.NewBlockstore(ds) + + for _, b := range blks { + err := bs.Put(ctx, b) + require.NoError(t, err) + } + + lbs := newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + return bs, nil + }) + + ch, err := lbs.AllKeysChan(context.Background()) + require.NoError(t, err) + + var count int + for k := range ch { + count++ + has, err := bs.Has(ctx, k) + require.NoError(t, err) + require.True(t, has) + } + require.Len(t, blks, count) +} + +func TestLazyBlockstoreHas(t *testing.T) { + ctx := context.TODO() + b := shared_testutil.GenerateBlocksOfSize(1, 1024)[0] + + ds := ds.NewMapDatastore() + bs := bstore.NewBlockstore(ds) + err := bs.Put(ctx, b) + require.NoError(t, err) + + lbs := newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + return bs, nil + }) + + has, err := lbs.Has(ctx, b.Cid()) + require.NoError(t, err) + require.True(t, has) +} + +func TestLazyBlockstoreGetSize(t *testing.T) { + ctx := context.TODO() + b := shared_testutil.GenerateBlocksOfSize(1, 1024)[0] + + ds := ds.NewMapDatastore() + bs := bstore.NewBlockstore(ds) + err := bs.Put(ctx, b) + require.NoError(t, err) + + lbs := newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + return bs, nil + }) + + sz, err := lbs.GetSize(ctx, b.Cid()) + require.NoError(t, err) + require.Equal(t, 1024, sz) +} + +func TestLazyBlockstoreMultipleInvocations(t *testing.T) { + ctx := context.TODO() + b := shared_testutil.GenerateBlocksOfSize(1, 1024)[0] + + ds := ds.NewMapDatastore() + bs := bstore.NewBlockstore(ds) + err := bs.Put(ctx, b) + require.NoError(t, err) + + // Count the number of times that the init function is invoked + var invokedCount int + lbs := newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + invokedCount++ + return bs, nil + }) + + // Invoke Get twice + _, err = lbs.Get(ctx, b.Cid()) + require.NoError(t, err) + + _, err = lbs.Get(ctx, b.Cid()) + require.NoError(t, err) + + // Verify that the init function is only invoked once + require.Equal(t, 1, invokedCount) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider.go new file mode 100644 index 00000000000..1042abc2e4c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider.go @@ -0,0 +1,585 @@ +package retrievalimpl + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/askstore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/stores" +) + +// RetrievalProviderOption is a function that configures a retrieval provider +type RetrievalProviderOption func(p *Provider) + +// DealDecider is a function that makes a decision about whether to accept a deal +type DealDecider func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) + +type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) + +var queryTimeout = 5 * time.Second + +// Provider is the production implementation of the RetrievalProvider interface +type Provider struct { + dataTransfer datatransfer.Manager + node retrievalmarket.RetrievalProviderNode + sa retrievalmarket.SectorAccessor + network rmnet.RetrievalMarketNetwork + requestValidator *requestvalidation.ProviderRequestValidator + revalidator *requestvalidation.ProviderRevalidator + minerAddress address.Address + pieceStore piecestore.PieceStore + readyMgr *shared.ReadyManager + subscribers *pubsub.PubSub + stateMachines fsm.Group + migrateStateMachines func(context.Context) error + dealDecider DealDecider + askStore retrievalmarket.AskStore + disableNewDeals bool + retrievalPricingFunc RetrievalPricingFunc + dagStore stores.DAGStoreWrapper + stores *stores.ReadOnlyBlockstores +} + +type internalProviderEvent struct { + evt retrievalmarket.ProviderEvent + state retrievalmarket.ProviderDealState +} + +func providerDispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error { + ie, ok := evt.(internalProviderEvent) + if !ok { + return errors.New("wrong type of event") + } + cb, ok := subscriberFn.(retrievalmarket.ProviderSubscriber) + if !ok { + return errors.New("wrong type of event") + } + log.Debugw("process retrieval provider] listeners", "name", retrievalmarket.ProviderEvents[ie.evt], "proposal cid", ie.state.ID) + cb(ie.evt, ie.state) + return nil +} + +var _ retrievalmarket.RetrievalProvider = new(Provider) + +// DealDeciderOpt sets a custom protocol +func DealDeciderOpt(dd DealDecider) RetrievalProviderOption { + return func(provider *Provider) { + provider.dealDecider = dd + } +} + +// DisableNewDeals disables setup for v1 deal protocols +func DisableNewDeals() RetrievalProviderOption { + return func(provider *Provider) { + provider.disableNewDeals = true + } +} + +// NewProvider returns a new retrieval Provider +func NewProvider(minerAddress address.Address, + node retrievalmarket.RetrievalProviderNode, + sa retrievalmarket.SectorAccessor, + network rmnet.RetrievalMarketNetwork, + pieceStore piecestore.PieceStore, + dagStore stores.DAGStoreWrapper, + dataTransfer datatransfer.Manager, + ds datastore.Batching, + retrievalPricingFunc RetrievalPricingFunc, + opts ...RetrievalProviderOption, +) (retrievalmarket.RetrievalProvider, error) { + + if retrievalPricingFunc == nil { + return nil, xerrors.New("retrievalPricingFunc is nil") + } + + p := &Provider{ + dataTransfer: dataTransfer, + node: node, + sa: sa, + network: network, + minerAddress: minerAddress, + pieceStore: pieceStore, + subscribers: pubsub.New(providerDispatcher), + readyMgr: shared.NewReadyManager(), + retrievalPricingFunc: retrievalPricingFunc, + dagStore: dagStore, + stores: stores.NewReadOnlyBlockstores(), + } + + err := shared.MoveKey(ds, "retrieval-ask", "retrieval-ask/latest") + if err != nil { + return nil, err + } + + askStore, err := askstore.NewAskStore(namespace.Wrap(ds, datastore.NewKey("retrieval-ask")), datastore.NewKey("latest")) + if err != nil { + return nil, err + } + p.askStore = askStore + + retrievalMigrations, err := migrations.ProviderMigrations.Build() + if err != nil { + return nil, err + } + p.stateMachines, p.migrateStateMachines, err = versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: &providerDealEnvironment{p}, + StateType: retrievalmarket.ProviderDealState{}, + StateKeyField: "Status", + Events: providerstates.ProviderEvents, + StateEntryFuncs: providerstates.ProviderStateEntryFuncs, + FinalityStates: providerstates.ProviderFinalityStates, + Notifier: p.notifySubscribers, + }, retrievalMigrations, versioning.VersionKey("1")) + if err != nil { + return nil, err + } + p.Configure(opts...) + p.requestValidator = requestvalidation.NewProviderRequestValidator(&providerValidationEnvironment{p}) + transportConfigurer := dtutils.TransportConfigurer(network.ID(), &providerStoreGetter{p}) + p.revalidator = requestvalidation.NewProviderRevalidator(&providerRevalidatorEnvironment{p}) + + if p.disableNewDeals { + err = p.dataTransfer.RegisterVoucherType(&migrations.DealProposal0{}, p.requestValidator) + if err != nil { + return nil, err + } + err = p.dataTransfer.RegisterRevalidator(&migrations.DealPayment0{}, p.revalidator) + if err != nil { + return nil, err + } + } else { + err = p.dataTransfer.RegisterVoucherType(&retrievalmarket.DealProposal{}, p.requestValidator) + if err != nil { + return nil, err + } + err = p.dataTransfer.RegisterVoucherType(&migrations.DealProposal0{}, p.requestValidator) + if err != nil { + return nil, err + } + + err = p.dataTransfer.RegisterRevalidator(&retrievalmarket.DealPayment{}, p.revalidator) + if err != nil { + return nil, err + } + err = p.dataTransfer.RegisterRevalidator(&migrations.DealPayment0{}, requestvalidation.NewLegacyRevalidator(p.revalidator)) + if err != nil { + return nil, err + } + + err = p.dataTransfer.RegisterVoucherResultType(&retrievalmarket.DealResponse{}) + if err != nil { + return nil, err + } + + err = p.dataTransfer.RegisterTransportConfigurer(&retrievalmarket.DealProposal{}, transportConfigurer) + if err != nil { + return nil, err + } + } + err = p.dataTransfer.RegisterVoucherResultType(&migrations.DealResponse0{}) + if err != nil { + return nil, err + } + err = p.dataTransfer.RegisterTransportConfigurer(&migrations.DealProposal0{}, transportConfigurer) + if err != nil { + return nil, err + } + dataTransfer.SubscribeToEvents(dtutils.ProviderDataTransferSubscriber(p.stateMachines)) + return p, nil +} + +// Stop stops handling incoming requests. +func (p *Provider) Stop() error { + return p.network.StopHandlingRequests() +} + +// Start begins listening for deals on the given host. +// Start must be called in order to accept incoming deals. +func (p *Provider) Start(ctx context.Context) error { + go func() { + err := p.migrateStateMachines(ctx) + if err != nil { + log.Errorf("Migrating retrieval provider state machines: %s", err.Error()) + } + err = p.readyMgr.FireReady(err) + if err != nil { + log.Warnf("Publish retrieval provider ready event: %s", err.Error()) + } + }() + return p.network.SetDelegate(p) +} + +// OnReady registers a listener for when the provider has finished starting up +func (p *Provider) OnReady(ready shared.ReadyFunc) { + p.readyMgr.OnReady(ready) +} + +func (p *Provider) notifySubscribers(eventName fsm.EventName, state fsm.StateType) { + evt := eventName.(retrievalmarket.ProviderEvent) + ds := state.(retrievalmarket.ProviderDealState) + _ = p.subscribers.Publish(internalProviderEvent{evt, ds}) +} + +// SubscribeToEvents listens for events that happen related to client retrievals +func (p *Provider) SubscribeToEvents(subscriber retrievalmarket.ProviderSubscriber) retrievalmarket.Unsubscribe { + return retrievalmarket.Unsubscribe(p.subscribers.Subscribe(subscriber)) +} + +// GetAsk returns the current deal parameters this provider accepts +func (p *Provider) GetAsk() *retrievalmarket.Ask { + return p.askStore.GetAsk() +} + +// SetAsk sets the deal parameters this provider accepts +func (p *Provider) SetAsk(ask *retrievalmarket.Ask) { + + err := p.askStore.SetAsk(ask) + + if err != nil { + log.Warnf("Error setting retrieval ask: %w", err) + } +} + +// ListDeals lists all known retrieval deals +func (p *Provider) ListDeals() map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState { + var deals []retrievalmarket.ProviderDealState + _ = p.stateMachines.List(&deals) + dealMap := make(map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState) + for _, deal := range deals { + dealMap[retrievalmarket.ProviderDealIdentifier{Receiver: deal.Receiver, DealID: deal.ID}] = deal + } + return dealMap +} + +/* +HandleQueryStream is called by the network implementation whenever a new message is received on the query protocol + +A Provider handling a retrieval `Query` does the following: + +1. Get the node's chain head in order to get its miner worker address. + +2. Look in its piece store to determine if it can serve the given payload CID. + +3. Combine these results with its existing parameters for retrieval deals to construct a `retrievalmarket.QueryResponse` struct. + +4. Writes this response to the `Query` stream. + +The connection is kept open only as long as the query-response exchange. +*/ +func (p *Provider) HandleQueryStream(stream rmnet.RetrievalQueryStream) { + ctx, cancel := context.WithTimeout(context.TODO(), queryTimeout) + defer cancel() + + defer stream.Close() + query, err := stream.ReadQuery() + if err != nil { + return + } + + sendResp := func(resp retrievalmarket.QueryResponse) { + if err := stream.WriteQueryResponse(resp); err != nil { + log.Errorf("Retrieval query: writing query response: %s", err) + } + } + + answer := retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseUnavailable, + PieceCIDFound: retrievalmarket.QueryItemUnavailable, + MinPricePerByte: big.Zero(), + UnsealPrice: big.Zero(), + } + + // get chain head to query actor states. + tok, _, err := p.node.GetChainHead(ctx) + if err != nil { + log.Errorf("Retrieval query: GetChainHead: %s", err) + return + } + + // fetch the payment address the client should send the payment to. + paymentAddress, err := p.node.GetMinerWorkerAddress(ctx, p.minerAddress, tok) + if err != nil { + log.Errorf("Retrieval query: Lookup Payment Address: %s", err) + answer.Status = retrievalmarket.QueryResponseError + answer.Message = fmt.Sprintf("failed to look up payment address: %s", err) + sendResp(answer) + return + } + answer.PaymentAddress = paymentAddress + + // fetch the piece from which the payload will be retrieved. + // if user has specified the Piece in the request, we use that. + // Otherwise, we prefer a Piece which can retrieved from an unsealed sector. + pieceCID := cid.Undef + if query.PieceCID != nil { + pieceCID = *query.PieceCID + } + pieceInfo, isUnsealed, err := p.getPieceInfoFromCid(ctx, query.PayloadCID, pieceCID) + if err != nil { + log.Errorf("Retrieval query: getPieceInfoFromCid: %s", err) + if !xerrors.Is(err, retrievalmarket.ErrNotFound) { + answer.Status = retrievalmarket.QueryResponseError + answer.Message = fmt.Sprintf("failed to fetch piece to retrieve from: %s", err) + } else { + answer.Message = "piece info for cid not found (deal has not been added to a piece yet)" + } + + sendResp(answer) + return + } + + answer.Status = retrievalmarket.QueryResponseAvailable + answer.Size = uint64(pieceInfo.Deals[0].Length.Unpadded()) // TODO: verify on intermediate + answer.PieceCIDFound = retrievalmarket.QueryItemAvailable + + storageDeals, err := p.storageDealsForPiece(query.PieceCID != nil, query.PayloadCID, pieceInfo) + if err != nil { + log.Errorf("Retrieval query: storageDealsForPiece: %s", err) + answer.Status = retrievalmarket.QueryResponseError + answer.Message = fmt.Sprintf("failed to fetch storage deals containing payload: %s", err) + sendResp(answer) + return + } + + input := retrievalmarket.PricingInput{ + // piece from which the payload will be retrieved + // If user hasn't given a PieceCID, we try to choose an unsealed piece in the call to `getPieceInfoFromCid` above. + PieceCID: pieceInfo.PieceCID, + + PayloadCID: query.PayloadCID, + Unsealed: isUnsealed, + Client: stream.RemotePeer(), + } + ask, err := p.GetDynamicAsk(ctx, input, storageDeals) + if err != nil { + log.Errorf("Retrieval query: GetAsk: %s", err) + answer.Status = retrievalmarket.QueryResponseError + answer.Message = fmt.Sprintf("failed to price deal: %s", err) + sendResp(answer) + return + } + + answer.MinPricePerByte = ask.PricePerByte + answer.MaxPaymentInterval = ask.PaymentInterval + answer.MaxPaymentIntervalIncrease = ask.PaymentIntervalIncrease + answer.UnsealPrice = ask.UnsealPrice + sendResp(answer) +} + +// Given the CID of a block, find a piece that contains that block. +// If the client has specified which piece they want, return that piece. +// Otherwise prefer pieces that are already unsealed. +func (p *Provider) getPieceInfoFromCid(ctx context.Context, payloadCID, clientPieceCID cid.Cid) (piecestore.PieceInfo, bool, error) { + // Get all pieces that contain the target block + piecesWithTargetBlock, err := p.dagStore.GetPiecesContainingBlock(payloadCID) + if err != nil { + return piecestore.PieceInfoUndefined, false, xerrors.Errorf("getting pieces for cid %s: %w", payloadCID, err) + } + + // For each piece that contains the target block + var lastErr error + var sealedPieceInfo *piecestore.PieceInfo + for _, pieceWithTargetBlock := range piecesWithTargetBlock { + // Get the deals for the piece + pieceInfo, err := p.pieceStore.GetPieceInfo(pieceWithTargetBlock) + if err != nil { + lastErr = err + continue + } + + // if client wants to retrieve the payload from a specific piece, just return that piece. + if clientPieceCID.Defined() && pieceInfo.PieceCID.Equals(clientPieceCID) { + return pieceInfo, p.pieceInUnsealedSector(ctx, pieceInfo), nil + } + + // if client doesn't have a preference for a particular piece, prefer a piece + // for which an unsealed sector exists. + if clientPieceCID.Equals(cid.Undef) { + if p.pieceInUnsealedSector(ctx, pieceInfo) { + // The piece is in an unsealed sector, so just return it + return pieceInfo, true, nil + } + + if sealedPieceInfo == nil { + // The piece is not in an unsealed sector, so save it but keep + // checking other pieces to see if there is one that is in an + // unsealed sector + sealedPieceInfo = &pieceInfo + } + } + + } + + // Found a piece containing the target block, piece is in a sealed sector + if sealedPieceInfo != nil { + return *sealedPieceInfo, false, nil + } + + // Couldn't find a piece containing the target block + if lastErr == nil { + lastErr = xerrors.Errorf("unknown pieceCID %s", clientPieceCID.String()) + } + + // Error finding a piece containing the target block + return piecestore.PieceInfoUndefined, false, xerrors.Errorf("could not locate piece: %w", lastErr) +} + +func (p *Provider) pieceInUnsealedSector(ctx context.Context, pieceInfo piecestore.PieceInfo) bool { + for _, di := range pieceInfo.Deals { + isUnsealed, err := p.sa.IsUnsealed(ctx, di.SectorID, di.Offset.Unpadded(), di.Length.Unpadded()) + if err != nil { + log.Errorf("failed to find out if sector %d is unsealed, err=%s", di.SectorID, err) + continue + } + if isUnsealed { + return true + } + } + + return false +} + +func (p *Provider) storageDealsForPiece(clientSpecificPiece bool, payloadCID cid.Cid, pieceInfo piecestore.PieceInfo) ([]abi.DealID, error) { + var storageDeals []abi.DealID + var err error + if clientSpecificPiece { + // If the user wants to retrieve the payload from a specific piece, + // we only need to inspect storage deals made for that piece to quote a price. + for _, d := range pieceInfo.Deals { + storageDeals = append(storageDeals, d.DealID) + } + } else { + // If the user does NOT want to retrieve from a specific piece, we'll have to inspect all storage deals + // made for that piece to quote a price. + storageDeals, err = p.getAllDealsContainingPayload(payloadCID) + if err != nil { + return nil, xerrors.Errorf("failed to fetch deals for payload: %w", err) + } + } + + if len(storageDeals) == 0 { + return nil, xerrors.New("no storage deals found") + } + + return storageDeals, nil +} + +func (p *Provider) getAllDealsContainingPayload(payloadCID cid.Cid) ([]abi.DealID, error) { + // Get all pieces that contain the target block + piecesWithTargetBlock, err := p.dagStore.GetPiecesContainingBlock(payloadCID) + if err != nil { + return nil, xerrors.Errorf("getting pieces for cid %s: %w", payloadCID, err) + } + + // For each piece that contains the target block + var lastErr error + var dealsIds []abi.DealID + for _, pieceWithTargetBlock := range piecesWithTargetBlock { + // Get the deals for the piece + pieceInfo, err := p.pieceStore.GetPieceInfo(pieceWithTargetBlock) + if err != nil { + lastErr = err + continue + } + + for _, d := range pieceInfo.Deals { + dealsIds = append(dealsIds, d.DealID) + } + } + + if lastErr == nil && len(dealsIds) == 0 { + return nil, xerrors.New("no deals found") + } + + if lastErr != nil && len(dealsIds) == 0 { + return nil, xerrors.Errorf("failed to fetch deals containing payload %s: %w", payloadCID, lastErr) + } + + return dealsIds, nil +} + +// GetDynamicAsk quotes a dynamic price for the retrieval deal by calling the user configured +// dynamic pricing function. It passes the static price parameters set in the Ask Store to the pricing function. +func (p *Provider) GetDynamicAsk(ctx context.Context, input retrievalmarket.PricingInput, storageDeals []abi.DealID) (retrievalmarket.Ask, error) { + dp, err := p.node.GetRetrievalPricingInput(ctx, input.PieceCID, storageDeals) + if err != nil { + return retrievalmarket.Ask{}, xerrors.Errorf("GetRetrievalPricingInput: %s", err) + } + // currAsk cannot be nil as we initialize the ask store with a default ask. + // Users can then change the values in the ask store using SetAsk but not remove it. + currAsk := p.GetAsk() + if currAsk == nil { + return retrievalmarket.Ask{}, xerrors.New("no ask configured in ask-store") + } + + dp.PayloadCID = input.PayloadCID + dp.PieceCID = input.PieceCID + dp.Unsealed = input.Unsealed + dp.Client = input.Client + dp.CurrentAsk = *currAsk + + ask, err := p.retrievalPricingFunc(ctx, dp) + if err != nil { + return retrievalmarket.Ask{}, xerrors.Errorf("retrievalPricingFunc: %w", err) + } + return ask, nil +} + +// Configure reconfigures a provider after initialization +func (p *Provider) Configure(opts ...RetrievalProviderOption) { + for _, opt := range opts { + opt(p) + } +} + +// ProviderFSMParameterSpec is a valid set of parameters for a provider FSM - used in doc generation +var ProviderFSMParameterSpec = fsm.Parameters{ + Environment: &providerDealEnvironment{}, + StateType: retrievalmarket.ProviderDealState{}, + StateKeyField: "Status", + Events: providerstates.ProviderEvents, + StateEntryFuncs: providerstates.ProviderStateEntryFuncs, +} + +// DefaultPricingFunc is the default pricing policy that will be used to price retrieval deals. +var DefaultPricingFunc = func(VerifiedDealsFreeTransfer bool) func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := pricingInput.CurrentAsk + + // don't charge for Unsealing if we have an Unsealed copy. + if pricingInput.Unsealed { + ask.UnsealPrice = big.Zero() + } + + // don't charge for data transfer for verified deals if it's been configured to do so. + if pricingInput.VerifiedDeal && VerifiedDealsFreeTransfer { + ask.PricePerByte = big.Zero() + } + + return ask, nil + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_environments.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_environments.go new file mode 100644 index 00000000000..cbb46b46e84 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_environments.go @@ -0,0 +1,208 @@ +package retrievalimpl + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/dagstore" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var _ requestvalidation.ValidationEnvironment = new(providerValidationEnvironment) + +type providerValidationEnvironment struct { + p *Provider +} + +func (pve *providerValidationEnvironment) GetAsk(ctx context.Context, payloadCid cid.Cid, pieceCid *cid.Cid, + piece piecestore.PieceInfo, isUnsealed bool, client peer.ID) (retrievalmarket.Ask, error) { + + storageDeals, err := pve.p.storageDealsForPiece(pieceCid != nil, payloadCid, piece) + if err != nil { + return retrievalmarket.Ask{}, xerrors.Errorf("failed to fetch deals for payload: %w", err) + } + + input := retrievalmarket.PricingInput{ + // piece from which the payload will be retrieved + PieceCID: piece.PieceCID, + + PayloadCID: payloadCid, + Unsealed: isUnsealed, + Client: client, + } + + return pve.p.GetDynamicAsk(ctx, input, storageDeals) +} + +func (pve *providerValidationEnvironment) GetPiece(c cid.Cid, pieceCID *cid.Cid) (piecestore.PieceInfo, bool, error) { + inPieceCid := cid.Undef + if pieceCID != nil { + inPieceCid = *pieceCID + } + + return pve.p.getPieceInfoFromCid(context.TODO(), c, inPieceCid) +} + +// CheckDealParams verifies the given deal params are acceptable +func (pve *providerValidationEnvironment) CheckDealParams(ask retrievalmarket.Ask, pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, unsealPrice abi.TokenAmount) error { + if pricePerByte.LessThan(ask.PricePerByte) { + return errors.New("Price per byte too low") + } + if paymentInterval > ask.PaymentInterval { + return errors.New("Payment interval too large") + } + if paymentIntervalIncrease > ask.PaymentIntervalIncrease { + return errors.New("Payment interval increase too large") + } + if !ask.UnsealPrice.Nil() && unsealPrice.LessThan(ask.UnsealPrice) { + return errors.New("Unseal price too small") + } + return nil +} + +// RunDealDecisioningLogic runs custom deal decision logic to decide if a deal is accepted, if present +func (pve *providerValidationEnvironment) RunDealDecisioningLogic(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + if pve.p.dealDecider == nil { + return true, "", nil + } + return pve.p.dealDecider(ctx, state) +} + +// StateMachines returns the FSM Group to begin tracking with +func (pve *providerValidationEnvironment) BeginTracking(pds retrievalmarket.ProviderDealState) error { + err := pve.p.stateMachines.Begin(pds.Identifier(), &pds) + if err != nil { + return err + } + + if pds.UnsealPrice.GreaterThan(big.Zero()) { + return pve.p.stateMachines.Send(pds.Identifier(), retrievalmarket.ProviderEventPaymentRequested, uint64(0)) + } + + return pve.p.stateMachines.Send(pds.Identifier(), retrievalmarket.ProviderEventOpen) +} + +type providerRevalidatorEnvironment struct { + p *Provider +} + +func (pre *providerRevalidatorEnvironment) Node() retrievalmarket.RetrievalProviderNode { + return pre.p.node +} + +func (pre *providerRevalidatorEnvironment) SendEvent(dealID retrievalmarket.ProviderDealIdentifier, evt retrievalmarket.ProviderEvent, args ...interface{}) error { + return pre.p.stateMachines.Send(dealID, evt, args...) +} + +func (pre *providerRevalidatorEnvironment) Get(dealID retrievalmarket.ProviderDealIdentifier) (retrievalmarket.ProviderDealState, error) { + var deal retrievalmarket.ProviderDealState + err := pre.p.stateMachines.GetSync(context.TODO(), dealID, &deal) + return deal, err +} + +var _ providerstates.ProviderDealEnvironment = new(providerDealEnvironment) + +type providerDealEnvironment struct { + p *Provider +} + +// Node returns the node interface for this deal +func (pde *providerDealEnvironment) Node() retrievalmarket.RetrievalProviderNode { + return pde.p.node +} + +// PrepareBlockstore is called when the deal data has been unsealed and we need +// to add all blocks to a blockstore that is used to serve retrieval +func (pde *providerDealEnvironment) PrepareBlockstore(ctx context.Context, dealID retrievalmarket.DealID, pieceCid cid.Cid) error { + // Load the blockstore that has the deal data + bs, err := pde.p.dagStore.LoadShard(ctx, pieceCid) + if err != nil { + return xerrors.Errorf("failed to load blockstore for piece %s: %w", pieceCid, err) + } + + log.Debugf("adding blockstore for deal %d to tracker", dealID) + _, err = pde.p.stores.Track(dealID.String(), bs) + log.Debugf("added blockstore for deal %d to tracker", dealID) + return err +} + +func (pde *providerDealEnvironment) TrackTransfer(deal retrievalmarket.ProviderDealState) error { + pde.p.revalidator.TrackChannel(deal) + return nil +} + +func (pde *providerDealEnvironment) UntrackTransfer(deal retrievalmarket.ProviderDealState) error { + pde.p.revalidator.UntrackChannel(deal) + return nil +} + +func (pde *providerDealEnvironment) ResumeDataTransfer(ctx context.Context, chid datatransfer.ChannelID) error { + return pde.p.dataTransfer.ResumeDataTransferChannel(ctx, chid) +} + +func (pde *providerDealEnvironment) CloseDataTransfer(ctx context.Context, chid datatransfer.ChannelID) error { + // When we close the data transfer, we also send a cancel message to the peer. + // Make sure we don't wait too long to send the message. + ctx, cancel := context.WithTimeout(ctx, shared.CloseDataTransferTimeout) + defer cancel() + + err := pde.p.dataTransfer.CloseDataTransferChannel(ctx, chid) + if shared.IsCtxDone(err) { + log.Warnf("failed to send cancel data transfer channel %s to client within timeout %s", + chid, shared.CloseDataTransferTimeout) + return nil + } + return err +} + +func (pde *providerDealEnvironment) DeleteStore(dealID retrievalmarket.DealID) error { + // close the read-only blockstore and stop tracking it for the deal + if err := pde.p.stores.Untrack(dealID.String()); err != nil { + return xerrors.Errorf("failed to clean read-only blockstore for deal %d: %w", dealID, err) + } + + return nil +} + +var _ dtutils.StoreGetter = &providerStoreGetter{} + +type providerStoreGetter struct { + p *Provider +} + +func (psg *providerStoreGetter) Get(otherPeer peer.ID, dealID retrievalmarket.DealID) (bstore.Blockstore, error) { + var deal retrievalmarket.ProviderDealState + provDealID := retrievalmarket.ProviderDealIdentifier{Receiver: otherPeer, DealID: dealID} + err := psg.p.stateMachines.Get(provDealID).Get(&deal) + if err != nil { + return nil, xerrors.Errorf("failed to get deal state: %w", err) + } + + // + // When a request for data is received + // 1. The data transfer layer calls Get to get the blockstore + // 2. The data for the deal is unsealed + // 3. The unsealed data is put into the blockstore (in this case a CAR file) + // 4. The data is served from the blockstore (using blockstore.Get) + // + // So we use a "lazy" blockstore that can be returned in step 1 + // but is only accessed in step 4 after the data has been unsealed. + // + return newLazyBlockstore(func() (dagstore.ReadBlockstore, error) { + return psg.p.stores.Get(dealID.String()) + }), nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_test.go new file mode 100644 index 00000000000..bac7756f2c0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/provider_test.go @@ -0,0 +1,1243 @@ +package retrievalimpl_test + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dss "github.com/ipfs/go-datastore/sync" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/piecestore" + piecemigrations "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestDynamicPricing(t *testing.T) { + ctx := context.Background() + expectedAddress := address.TestAddress2 + + payloadCID := tut.GenerateCids(1)[0] + peer1 := peer.ID("peer1") + peer2 := peer.ID("peer2") + + // differential price per byte + expectedppbUnVerified := abi.NewTokenAmount(4321) + expectedppbVerified := abi.NewTokenAmount(2) + + // differential sealing/unsealing price + expectedUnsealPrice := abi.NewTokenAmount(100) + expectedUnsealDiscount := abi.NewTokenAmount(1) + + // differential payment interval + expectedpiPeer1 := uint64(4567) + expectedpiPeer2 := uint64(20) + + expectedPaymentIntervalIncrease := uint64(100) + + // multiple pieces have the same payload + expectedPieceCID1 := tut.GenerateCids(1)[0] + expectedPieceCID2 := tut.GenerateCids(1)[0] + + // sizes + piece1SizePadded := uint64(1234) + piece1Size := uint64(abi.PaddedPieceSize(piece1SizePadded).Unpadded()) + + piece2SizePadded := uint64(2234) + piece2Size := uint64(abi.PaddedPieceSize(piece2SizePadded).Unpadded()) + + piece1 := piecestore.PieceInfo{ + PieceCID: expectedPieceCID1, + Deals: []piecestore.DealInfo{ + { + DealID: abi.DealID(1), + Length: abi.PaddedPieceSize(piece1SizePadded), + }, + { + DealID: abi.DealID(11), + Length: abi.PaddedPieceSize(piece1SizePadded), + }, + }, + } + + piece2 := piecestore.PieceInfo{ + PieceCID: expectedPieceCID2, + Deals: []piecestore.DealInfo{ + { + DealID: abi.DealID(2), + Length: abi.PaddedPieceSize(piece2SizePadded), + }, + { + DealID: abi.DealID(22), + Length: abi.PaddedPieceSize(piece2SizePadded), + }, + { + DealID: abi.DealID(222), + Length: abi.PaddedPieceSize(piece2SizePadded), + }, + }, + } + + dPriceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + + if dealPricingParams.VerifiedDeal { + ask.PricePerByte = expectedppbVerified + } else { + ask.PricePerByte = expectedppbUnVerified + } + + if dealPricingParams.Unsealed { + ask.UnsealPrice = expectedUnsealDiscount + } else { + ask.UnsealPrice = expectedUnsealPrice + } + + fmt.Println("\n client is", dealPricingParams.Client.String()) + if dealPricingParams.Client == peer2 { + ask.PaymentInterval = expectedpiPeer2 + } else { + ask.PaymentInterval = expectedpiPeer1 + } + ask.PaymentIntervalIncrease = expectedPaymentIntervalIncrease + + return ask, nil + } + + buildProvider := func( + t *testing.T, + node *testnodes.TestRetrievalProviderNode, + sa retrievalmarket.SectorAccessor, + qs network.RetrievalQueryStream, + pieceStore piecestore.PieceStore, + dagStore *tut.MockDagStoreWrapper, + net *tut.TestRetrievalMarketNetwork, + pFnc retrievalimpl.RetrievalPricingFunc, + ) retrievalmarket.RetrievalProvider { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + c, err := retrievalimpl.NewProvider(expectedAddress, node, sa, net, pieceStore, dagStore, dt, ds, pFnc) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, c) + return c + } + + readWriteQueryStream := func() *tut.TestRetrievalQueryStream { + qRead, qWrite := tut.QueryReadWriter() + qrRead, qrWrite := tut.QueryResponseReadWriter() + qs := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + Reader: qRead, + Writer: qWrite, + RespReader: qrRead, + RespWriter: qrWrite, + }) + return qs + } + + tcs := map[string]struct { + query retrievalmarket.Query + expFunc func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) + nodeFunc func(n *testnodes.TestRetrievalProviderNode) + sectorAccessorFunc func(sa *testnodes.TestSectorAccessor) + peerIdFnc func(stream *tut.TestRetrievalQueryStream) + providerFnc func(provider retrievalmarket.RetrievalProvider) + + pricingFnc retrievalimpl.RetrievalPricingFunc + + expectedPricePerByte abi.TokenAmount + expectedPaymentInterval uint64 + expectedPaymentIntervalIncrease uint64 + expectedUnsealPrice abi.TokenAmount + expectedSize uint64 + }{ + // Retrieval request for a payloadCid without a pieceCid + "pieceCid no-op: quote correct price for sealed, unverified, peer1": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11, 2, 22, 222}) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbUnVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "pieceCid no-op: quote correct price for sealed, unverified, peer2": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer2) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11, 2, 22, 222}) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbUnVerified, + expectedPaymentInterval: expectedpiPeer2, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "pieceCid no-op: quote correct price for sealed, verified, peer1": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11, 2, 22, 222}) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "pieceCid no-op: quote correct price for unsealed, unverified, peer1": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{1, 11, 2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + //pieceStore.ExpectCID(payloadCID, expectedCIDInfo) + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbUnVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealDiscount, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "pieceCid no-op: quote correct price for unsealed, verified, peer1": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{1, 11, 2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealDiscount, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "pieceCid no-op: quote correct price for unsealed, verified, peer1 using default pricing policy if data transfer fee set to zero for verified deals": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{1, 11, 2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + + providerFnc: func(provider retrievalmarket.RetrievalProvider) { + ask := provider.GetAsk() + ask.PaymentInterval = expectedpiPeer1 + ask.PaymentIntervalIncrease = expectedPaymentIntervalIncrease + provider.SetAsk(ask) + }, + + pricingFnc: retrievalimpl.DefaultPricingFunc(true), + + expectedPricePerByte: big.Zero(), + expectedUnsealPrice: big.Zero(), + expectedPaymentInterval: expectedpiPeer1, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "pieceCid no-op: quote correct price for unsealed, verified, peer1 using default pricing policy if data transfer fee not set to zero for verified deals": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{1, 11, 2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + + providerFnc: func(provider retrievalmarket.RetrievalProvider) { + ask := provider.GetAsk() + ask.PricePerByte = expectedppbVerified + ask.PaymentInterval = expectedpiPeer1 + ask.PaymentIntervalIncrease = expectedPaymentIntervalIncrease + provider.SetAsk(ask) + }, + + pricingFnc: retrievalimpl.DefaultPricingFunc(false), + + expectedPricePerByte: expectedppbVerified, + expectedUnsealPrice: big.Zero(), + expectedPaymentInterval: expectedpiPeer1, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "pieceCid no-op: quote correct price for sealed, verified, peer1 using default pricing policy": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11, 2, 22, 222}) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) { + ask := provider.GetAsk() + ask.PricePerByte = expectedppbVerified + ask.PaymentInterval = expectedpiPeer1 + ask.PaymentIntervalIncrease = expectedPaymentIntervalIncrease + ask.UnsealPrice = expectedUnsealPrice + provider.SetAsk(ask) + }, + pricingFnc: retrievalimpl.DefaultPricingFunc(false), + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + // Retrieval requests for a payloadCid inside a specific piece Cid + "specific sealed piece Cid, first piece Cid matches: quote correct price for sealed, unverified, peer1": { + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID1}, + }, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbUnVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "specific sealed piece Cid, second piece Cid matches: quote correct price for sealed, unverified, peer1": { + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID2}, + }, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece1.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID1, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbUnVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "specific sealed piece Cid, first piece Cid matches: quote correct price for sealed, verified, peer1": { + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID1}, + }, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11}) + n.MarkVerified() + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealPrice, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "specific sealed piece Cid, first piece Cid matches: quote correct price for unsealed, verified, peer1": { + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID1}, + }, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece1.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: expectedUnsealDiscount, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + + "specific sealed piece Cid, first piece Cid matches: quote correct price for unsealed, verified, peer2": { + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID2}, + }, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer2) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.MarkVerified() + n.ExpectPricingParams(expectedPieceCID2, []abi.DealID{2, 22, 222}) + }, + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := piece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) {}, + pricingFnc: dPriceFunc, + + expectedPricePerByte: expectedppbVerified, + expectedPaymentInterval: expectedpiPeer2, + expectedUnsealPrice: expectedUnsealDiscount, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece2Size, + }, + + "pieceCid no-op: quote correct price for sealed, unverified, peer1 based on a pre-existing ask": { + query: retrievalmarket.Query{PayloadCID: payloadCID}, + peerIdFnc: func(qs *tut.TestRetrievalQueryStream) { + qs.SetRemotePeer(peer1) + }, + nodeFunc: func(n *testnodes.TestRetrievalProviderNode) { + n.ExpectPricingParams(expectedPieceCID1, []abi.DealID{1, 11, 2, 22, 222}) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID1, piece1) + pieceStore.ExpectPiece(expectedPieceCID2, piece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID1) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + providerFnc: func(provider retrievalmarket.RetrievalProvider) { + ask := provider.GetAsk() + ask.PricePerByte = expectedppbUnVerified + ask.UnsealPrice = expectedUnsealPrice + provider.SetAsk(ask) + }, + pricingFnc: func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask, _ := dPriceFunc(ctx, dealPricingParams) + ppb := big.Add(ask.PricePerByte, dealPricingParams.CurrentAsk.PricePerByte) + unseal := big.Add(ask.UnsealPrice, dealPricingParams.CurrentAsk.UnsealPrice) + ask.PricePerByte = ppb + ask.UnsealPrice = unseal + return ask, nil + }, + + expectedPricePerByte: big.Mul(expectedppbUnVerified, big.NewInt(2)), + expectedPaymentInterval: expectedpiPeer1, + expectedUnsealPrice: big.Mul(expectedUnsealPrice, big.NewInt(2)), + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedSize: piece1Size, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + sectorAccessor := testnodes.NewTestSectorAccessor() + qs := readWriteQueryStream() + tc.peerIdFnc(qs) + + err := qs.WriteQuery(tc.query) + require.NoError(t, err) + pieceStore := tut.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sectorAccessor) + tc.nodeFunc(node) + if tc.sectorAccessorFunc != nil { + tc.sectorAccessorFunc(sectorAccessor) + } + tc.expFunc(t, pieceStore, dagStore) + + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) + p := buildProvider(t, node, sectorAccessor, qs, pieceStore, dagStore, net, tc.pricingFnc) + tc.providerFnc(p) + net.ReceiveQueryStream(qs) + + actualResp, err := qs.ReadQueryResponse() + require.NoError(t, err) + pieceStore.VerifyExpectations(t) + node.VerifyExpectations(t) + sectorAccessor.VerifyExpectations(t) + + require.Equal(t, expectedAddress, actualResp.PaymentAddress) + require.Equal(t, tc.expectedPricePerByte, actualResp.MinPricePerByte) + require.Equal(t, tc.expectedUnsealPrice, actualResp.UnsealPrice) + require.Equal(t, tc.expectedPaymentInterval, actualResp.MaxPaymentInterval) + require.Equal(t, tc.expectedPaymentIntervalIncrease, actualResp.MaxPaymentIntervalIncrease) + require.Equal(t, tc.expectedSize, actualResp.Size) + }) + } +} + +func TestHandleQueryStream(t *testing.T) { + ctx := context.Background() + + payloadCID := tut.GenerateCids(1)[0] + expectedPeer := peer.ID("somepeer") + paddedSize := uint64(1234) + expectedSize := uint64(abi.PaddedPieceSize(paddedSize).Unpadded()) + + paddedSize2 := uint64(2234) + expectedSize2 := uint64(abi.PaddedPieceSize(paddedSize2).Unpadded()) + + expectedPieceCID := tut.GenerateCids(1)[0] + expectedPieceCID2 := tut.GenerateCids(1)[0] + + expectedPiece := piecestore.PieceInfo{ + PieceCID: expectedPieceCID, + Deals: []piecestore.DealInfo{ + { + Length: abi.PaddedPieceSize(paddedSize), + }, + }, + } + + expectedPiece2 := piecestore.PieceInfo{ + PieceCID: expectedPieceCID2, + Deals: []piecestore.DealInfo{ + { + Length: abi.PaddedPieceSize(paddedSize2), + }, + }, + } + + expectedAddress := address.TestAddress2 + expectedPricePerByte := abi.NewTokenAmount(4321) + expectedPaymentInterval := uint64(4567) + expectedPaymentIntervalIncrease := uint64(100) + expectedUnsealPrice := abi.NewTokenAmount(100) + + // differential pricing + expectedUnsealDiscount := abi.NewTokenAmount(1) + + readWriteQueryStream := func() network.RetrievalQueryStream { + qRead, qWrite := tut.QueryReadWriter() + qrRead, qrWrite := tut.QueryResponseReadWriter() + qs := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + PeerID: expectedPeer, + Reader: qRead, + Writer: qWrite, + RespReader: qrRead, + RespWriter: qrWrite, + }) + return qs + } + + receiveStreamOnProvider := func( + t *testing.T, + node *testnodes.TestRetrievalProviderNode, + sa *testnodes.TestSectorAccessor, + qs network.RetrievalQueryStream, + pieceStore piecestore.PieceStore, + dagStore *tut.MockDagStoreWrapper, + ) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + dt := tut.NewTestDataTransfer() + net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + ask.PricePerByte = expectedPricePerByte + ask.PaymentInterval = expectedPaymentInterval + ask.PaymentIntervalIncrease = expectedPaymentIntervalIncrease + + if dealPricingParams.Unsealed { + ask.UnsealPrice = expectedUnsealDiscount + } else { + ask.UnsealPrice = expectedUnsealPrice + } + return ask, nil + } + + c, err := retrievalimpl.NewProvider(expectedAddress, node, sa, net, pieceStore, dagStore, dt, ds, priceFunc) + require.NoError(t, err) + + tut.StartAndWaitForReady(ctx, t, c) + + net.ReceiveQueryStream(qs) + } + + testCases := []struct { + name string + query retrievalmarket.Query + expResp retrievalmarket.QueryResponse + expErr string + expFunc func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) + sectorAccessorFunc func(sa *testnodes.TestSectorAccessor) + + expectedPricePerByte abi.TokenAmount + expectedPaymentInterval uint64 + expectedPaymentIntervalIncrease uint64 + expectedUnsealPrice abi.TokenAmount + }{ + {name: "When PieceCID is not provided and PayloadCID is found", + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID, expectedPiece) + pieceStore.ExpectPiece(expectedPieceCID2, expectedPiece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + query: retrievalmarket.Query{PayloadCID: payloadCID}, + expResp: retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseAvailable, + PieceCIDFound: retrievalmarket.QueryItemAvailable, + Size: expectedSize, + }, + expectedPricePerByte: expectedPricePerByte, + expectedPaymentInterval: expectedPaymentInterval, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedUnsealPrice: expectedUnsealPrice, + }, + + {name: "When PieceCID is not provided, prefer a piece for which an unsealed sector already exists and price it accordingly", + sectorAccessorFunc: func(sa *testnodes.TestSectorAccessor) { + p := expectedPiece2.Deals[0] + sa.MarkUnsealed(context.TODO(), p.SectorID, p.Offset.Unpadded(), p.Length.Unpadded()) + }, + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + pieceStore.ExpectPiece(expectedPieceCID, expectedPiece) + pieceStore.ExpectPiece(expectedPieceCID2, expectedPiece2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + query: retrievalmarket.Query{PayloadCID: payloadCID}, + expResp: retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseAvailable, + PieceCIDFound: retrievalmarket.QueryItemAvailable, + Size: expectedSize2, + }, + expectedPricePerByte: expectedPricePerByte, + expectedPaymentInterval: expectedPaymentInterval, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedUnsealPrice: expectedUnsealDiscount, + }, + + {name: "When PieceCID is provided and both PieceCID and PayloadCID are found", + expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + loadPieceCIDS(t, pieceStore, payloadCID, expectedPieceCID) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + }, + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, + }, + expResp: retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseAvailable, + PieceCIDFound: retrievalmarket.QueryItemAvailable, + Size: expectedSize, + }, + expectedPricePerByte: expectedPricePerByte, + expectedPaymentInterval: expectedPaymentInterval, + expectedPaymentIntervalIncrease: expectedPaymentIntervalIncrease, + expectedUnsealPrice: expectedUnsealPrice, + }, + {name: "When QueryParams has PieceCID and is missing", + expFunc: func(t *testing.T, ps *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) { + loadPieceCIDS(t, ps, payloadCID, cid.Undef) + ps.ExpectMissingPiece(expectedPieceCID) + ps.ExpectMissingPiece(expectedPieceCID2) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID2) + }, + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, + }, + expResp: retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseUnavailable, + PieceCIDFound: retrievalmarket.QueryItemUnavailable, + Message: "piece info for cid not found (deal has not been added to a piece yet)", + }, + expectedPricePerByte: big.Zero(), + expectedPaymentInterval: 0, + expectedPaymentIntervalIncrease: 0, + expectedUnsealPrice: big.Zero(), + }, + {name: "When payload CID not found", + expFunc: func(t *testing.T, ps *tut.TestPieceStore, dagStore *tut.MockDagStoreWrapper) {}, + query: retrievalmarket.Query{ + PayloadCID: payloadCID, + QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, + }, + expResp: retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseUnavailable, + PieceCIDFound: retrievalmarket.QueryItemUnavailable, + Message: "piece info for cid not found (deal has not been added to a piece yet)", + }, + expectedPricePerByte: big.Zero(), + expectedPaymentInterval: 0, + expectedPaymentIntervalIncrease: 0, + expectedUnsealPrice: big.Zero(), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + qs := readWriteQueryStream() + err := qs.WriteQuery(tc.query) + require.NoError(t, err) + pieceStore := tut.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + if tc.sectorAccessorFunc != nil { + tc.sectorAccessorFunc(sa) + } + + tc.expFunc(t, pieceStore, dagStore) + + receiveStreamOnProvider(t, node, sa, qs, pieceStore, dagStore) + + actualResp, err := qs.ReadQueryResponse() + pieceStore.VerifyExpectations(t) + if tc.expErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expErr) + } + + tc.expResp.PaymentAddress = expectedAddress + tc.expResp.MinPricePerByte = tc.expectedPricePerByte + tc.expResp.MaxPaymentInterval = tc.expectedPaymentInterval + tc.expResp.MaxPaymentIntervalIncrease = tc.expectedPaymentIntervalIncrease + tc.expResp.UnsealPrice = tc.expectedUnsealPrice + assert.Equal(t, tc.expResp, actualResp) + }) + } + + t.Run("error reading piece", func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + + qs := readWriteQueryStream() + err := qs.WriteQuery(retrievalmarket.Query{ + PayloadCID: payloadCID, + }) + require.NoError(t, err) + pieceStore := tut.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + + receiveStreamOnProvider(t, node, sa, qs, pieceStore, dagStore) + + response, err := qs.ReadQueryResponse() + require.NoError(t, err) + require.Equal(t, response.Status, retrievalmarket.QueryResponseError) + require.NotEmpty(t, response.Message) + }) + + t.Run("when ReadDealStatusRequest fails", func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + qs := readWriteQueryStream() + pieceStore := tut.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + + receiveStreamOnProvider(t, node, sa, qs, pieceStore, dagStore) + + response, err := qs.ReadQueryResponse() + require.NotNil(t, err) + require.Equal(t, response, retrievalmarket.QueryResponseUndefined) + }) + + t.Run("when WriteDealStatusResponse fails", func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + qRead, qWrite := tut.QueryReadWriter() + qs := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ + PeerID: expectedPeer, + Reader: qRead, + Writer: qWrite, + RespWriter: tut.FailResponseWriter, + }) + err := qs.WriteQuery(retrievalmarket.Query{ + PayloadCID: payloadCID, + }) + require.NoError(t, err) + pieceStore := tut.NewTestPieceStore() + pieceStore.ExpectPiece(expectedPieceCID, expectedPiece) + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPieceCID) + + receiveStreamOnProvider(t, node, sa, qs, pieceStore, dagStore) + + pieceStore.VerifyExpectations(t) + }) + +} + +func TestProvider_Construct(t *testing.T) { + ds := datastore.NewMapDatastore() + pieceStore := tut.NewTestPieceStore() + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + dt := tut.NewTestDataTransfer() + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + return ask, nil + } + + _, err := retrievalimpl.NewProvider( + tut.NewIDAddr(t, 2344), + node, + sa, + tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), + pieceStore, + dagStore, + dt, + ds, + priceFunc, + ) + require.NoError(t, err) + require.Len(t, dt.Subscribers, 1) + require.Len(t, dt.RegisteredVoucherResultTypes, 2) + _, ok := dt.RegisteredVoucherResultTypes[0].(*retrievalmarket.DealResponse) + require.True(t, ok) + _, ok = dt.RegisteredVoucherResultTypes[1].(*migrations.DealResponse0) + require.True(t, ok) + require.Len(t, dt.RegisteredVoucherTypes, 2) + _, ok = dt.RegisteredVoucherTypes[0].VoucherType.(*retrievalmarket.DealProposal) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[0].Validator.(*requestvalidation.ProviderRequestValidator) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[1].VoucherType.(*migrations.DealProposal0) + require.True(t, ok) + _, ok = dt.RegisteredVoucherTypes[1].Validator.(*requestvalidation.ProviderRequestValidator) + require.True(t, ok) + require.Len(t, dt.RegisteredRevalidators, 2) + _, ok = dt.RegisteredRevalidators[0].VoucherType.(*retrievalmarket.DealPayment) + require.True(t, ok) + _, ok = dt.RegisteredRevalidators[0].Revalidator.(*requestvalidation.ProviderRevalidator) + require.True(t, ok) + _, ok = dt.RegisteredRevalidators[1].VoucherType.(*migrations.DealPayment0) + require.True(t, ok) + require.Len(t, dt.RegisteredTransportConfigurers, 2) + _, ok = dt.RegisteredTransportConfigurers[0].VoucherType.(*retrievalmarket.DealProposal) + _, ok = dt.RegisteredTransportConfigurers[1].VoucherType.(*migrations.DealProposal0) + + require.True(t, ok) +} + +func TestProviderConfigOpts(t *testing.T) { + var sawOpt int + opt1 := func(p *retrievalimpl.Provider) { sawOpt++ } + opt2 := func(p *retrievalimpl.Provider) { sawOpt += 2 } + ds := datastore.NewMapDatastore() + pieceStore := tut.NewTestPieceStore() + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + return ask, nil + } + + p, err := retrievalimpl.NewProvider( + tut.NewIDAddr(t, 2344), + node, + sa, + tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), + pieceStore, + dagStore, + tut.NewTestDataTransfer(), + ds, priceFunc, opt1, opt2, + ) + require.NoError(t, err) + assert.NotNil(t, p) + assert.Equal(t, 3, sawOpt) + + // just test that we can create a DealDeciderOpt function and that it runs + // successfully in the constructor + ddOpt := retrievalimpl.DealDeciderOpt( + func(_ context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + return true, "yes", nil + }) + + p, err = retrievalimpl.NewProvider( + tut.NewIDAddr(t, 2344), + testnodes.NewTestRetrievalProviderNode(), + testnodes.NewTestSectorAccessor(), + tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), + tut.NewTestPieceStore(), + dagStore, + tut.NewTestDataTransfer(), + ds, priceFunc, ddOpt) + require.NoError(t, err) + require.NotNil(t, p) +} + +// loadPieceCIDS sets expectations to receive expectedPieceCID and 3 other random PieceCIDs to +// disinguish the case of a PayloadCID is found but the PieceCID is not +func loadPieceCIDS(t *testing.T, pieceStore *tut.TestPieceStore, expPayloadCID, expectedPieceCID cid.Cid) { + + otherPieceCIDs := tut.GenerateCids(3) + expectedSize := uint64(1234) + + blockLocs := make([]piecestore.PieceBlockLocation, 4) + expectedPieceInfo := piecestore.PieceInfo{ + PieceCID: expectedPieceCID, + Deals: []piecestore.DealInfo{ + { + Length: abi.PaddedPieceSize(expectedSize), + }, + }, + } + + blockLocs[0] = piecestore.PieceBlockLocation{PieceCID: expectedPieceCID} + for i, pieceCID := range otherPieceCIDs { + blockLocs[i+1] = piecestore.PieceBlockLocation{PieceCID: pieceCID} + pi := expectedPieceInfo + pi.PieceCID = pieceCID + } + if expectedPieceCID != cid.Undef { + pieceStore.ExpectPiece(expectedPieceCID, expectedPieceInfo) + } +} + +func TestProviderMigrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + ds := dss.MutexWrap(datastore.NewMapDatastore()) + pieceStore := tut.NewTestPieceStore() + node := testnodes.NewTestRetrievalProviderNode() + sa := testnodes.NewTestSectorAccessor() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + dt := tut.NewTestDataTransfer() + + providerDs := namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")) + + numDeals := 5 + payloadCIDs := make([]cid.Cid, numDeals) + iDs := make([]retrievalmarket.DealID, numDeals) + pieceCIDs := make([]*cid.Cid, numDeals) + pricePerBytes := make([]abi.TokenAmount, numDeals) + paymentIntervals := make([]uint64, numDeals) + paymentIntervalIncreases := make([]uint64, numDeals) + unsealPrices := make([]abi.TokenAmount, numDeals) + storeIDs := make([]uint64, numDeals) + channelIDs := make([]datatransfer.ChannelID, numDeals) + receivers := make([]peer.ID, numDeals) + totalSents := make([]uint64, numDeals) + messages := make([]string, numDeals) + currentIntervals := make([]uint64, numDeals) + fundsReceiveds := make([]abi.TokenAmount, numDeals) + selfPeer := tut.GeneratePeers(1)[0] + dealIDs := make([]abi.DealID, numDeals) + sectorIDs := make([]abi.SectorNumber, numDeals) + offsets := make([]abi.PaddedPieceSize, numDeals) + lengths := make([]abi.PaddedPieceSize, numDeals) + allSelectorBuf := new(bytes.Buffer) + err := dagcbor.Encode(selectorparse.CommonSelector_ExploreAllRecursively, allSelectorBuf) + require.NoError(t, err) + allSelectorBytes := allSelectorBuf.Bytes() + + for i := 0; i < numDeals; i++ { + payloadCIDs[i] = tut.GenerateCids(1)[0] + iDs[i] = retrievalmarket.DealID(rand.Uint64()) + pieceCID := tut.GenerateCids(1)[0] + pieceCIDs[i] = &pieceCID + pricePerBytes[i] = big.NewInt(rand.Int63()) + paymentIntervals[i] = rand.Uint64() + paymentIntervalIncreases[i] = rand.Uint64() + unsealPrices[i] = big.NewInt(rand.Int63()) + storeIDs[i] = rand.Uint64() + receivers[i] = tut.GeneratePeers(1)[0] + channelIDs[i] = datatransfer.ChannelID{ + Responder: selfPeer, + Initiator: receivers[i], + ID: datatransfer.TransferID(rand.Uint64()), + } + totalSents[i] = rand.Uint64() + messages[i] = string(tut.RandomBytes(20)) + currentIntervals[i] = rand.Uint64() + fundsReceiveds[i] = big.NewInt(rand.Int63()) + dealIDs[i] = abi.DealID(rand.Uint64()) + sectorIDs[i] = abi.SectorNumber(rand.Uint64()) + offsets[i] = abi.PaddedPieceSize(rand.Uint64()) + lengths[i] = abi.PaddedPieceSize(rand.Uint64()) + deal := migrations.ProviderDealState0{ + DealProposal0: migrations.DealProposal0{ + PayloadCID: payloadCIDs[i], + ID: iDs[i], + Params0: migrations.Params0{ + Selector: &cbg.Deferred{ + Raw: allSelectorBytes, + }, + PieceCID: pieceCIDs[i], + PricePerByte: pricePerBytes[i], + PaymentInterval: paymentIntervals[i], + PaymentIntervalIncrease: paymentIntervalIncreases[i], + UnsealPrice: unsealPrices[i], + }, + }, + StoreID: storeIDs[i], + ChannelID: channelIDs[i], + PieceInfo: &piecemigrations.PieceInfo0{ + PieceCID: pieceCID, + Deals: []piecemigrations.DealInfo0{ + { + DealID: dealIDs[i], + SectorID: sectorIDs[i], + Offset: offsets[i], + Length: lengths[i], + }, + }, + }, + Status: retrievalmarket.DealStatusCompleted, + Receiver: receivers[i], + TotalSent: totalSents[i], + Message: messages[i], + CurrentInterval: currentIntervals[i], + FundsReceived: fundsReceiveds[i], + } + buf := new(bytes.Buffer) + err := deal.MarshalCBOR(buf) + require.NoError(t, err) + err = providerDs.Put(ctx, datastore.NewKey(fmt.Sprint(deal.ID)), buf.Bytes()) + require.NoError(t, err) + } + oldAsk := &migrations.Ask0{ + PricePerByte: abi.NewTokenAmount(rand.Int63()), + UnsealPrice: abi.NewTokenAmount(rand.Int63()), + PaymentInterval: rand.Uint64(), + PaymentIntervalIncrease: rand.Uint64(), + } + askBuf := new(bytes.Buffer) + err = oldAsk.MarshalCBOR(askBuf) + require.NoError(t, err) + err = providerDs.Put(ctx, datastore.NewKey("retrieval-ask"), askBuf.Bytes()) + require.NoError(t, err) + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + return ask, nil + } + + retrievalProvider, err := retrievalimpl.NewProvider( + tut.NewIDAddr(t, 2344), + node, + sa, + tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), + pieceStore, + dagStore, + dt, + providerDs, + priceFunc, + ) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, retrievalProvider) + deals := retrievalProvider.ListDeals() + require.NoError(t, err) + for i := 0; i < numDeals; i++ { + deal, ok := deals[retrievalmarket.ProviderDealIdentifier{Receiver: receivers[i], DealID: iDs[i]}] + require.True(t, ok) + expectedDeal := retrievalmarket.ProviderDealState{ + DealProposal: retrievalmarket.DealProposal{ + PayloadCID: payloadCIDs[i], + ID: iDs[i], + Params: retrievalmarket.Params{ + Selector: &cbg.Deferred{ + Raw: allSelectorBytes, + }, + PieceCID: pieceCIDs[i], + PricePerByte: pricePerBytes[i], + PaymentInterval: paymentIntervals[i], + PaymentIntervalIncrease: paymentIntervalIncreases[i], + UnsealPrice: unsealPrices[i], + }, + }, + StoreID: storeIDs[i], + ChannelID: &channelIDs[i], + PieceInfo: &piecestore.PieceInfo{ + PieceCID: *pieceCIDs[i], + Deals: []piecestore.DealInfo{ + { + DealID: dealIDs[i], + SectorID: sectorIDs[i], + Offset: offsets[i], + Length: lengths[i], + }, + }, + }, + Status: retrievalmarket.DealStatusCompleted, + Receiver: receivers[i], + TotalSent: totalSents[i], + Message: messages[i], + CurrentInterval: currentIntervals[i], + FundsReceived: fundsReceiveds[i], + LegacyProtocol: true, + } + require.Equal(t, expectedDeal, deal) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/doc.go new file mode 100644 index 00000000000..8a350ba7cb2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/doc.go @@ -0,0 +1,13 @@ +/* +Package providerstates contains state machine logic relating to the `RetrievalProvider`. + +provider_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. + +provider_states.go contains state handler functions. + +The following diagram illustrates the operation of the provider state machine. This diagram is auto-generated from current code and should remain up to date over time: + +https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg + +*/ +package providerstates diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_fsm.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_fsm.go new file mode 100644 index 00000000000..2f92566ffbb --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_fsm.go @@ -0,0 +1,141 @@ +package providerstates + +import ( + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +func recordError(deal *rm.ProviderDealState, err error) error { + deal.Message = err.Error() + return nil +} + +// ProviderEvents are the events that can happen in a retrieval provider +var ProviderEvents = fsm.Events{ + // receiving new deal + fsm.Event(rm.ProviderEventOpen). + From(rm.DealStatusNew).ToNoChange(). + Action( + func(deal *rm.ProviderDealState) error { + deal.TotalSent = 0 + deal.FundsReceived = abi.NewTokenAmount(0) + deal.CurrentInterval = deal.PaymentInterval + return nil + }, + ), + + // accepting + fsm.Event(rm.ProviderEventDealAccepted). + From(rm.DealStatusFundsNeededUnseal).ToNoChange(). + From(rm.DealStatusNew).To(rm.DealStatusUnsealing). + Action(func(deal *rm.ProviderDealState, channelID datatransfer.ChannelID) error { + deal.ChannelID = &channelID + return nil + }), + + //unsealing + fsm.Event(rm.ProviderEventUnsealError). + From(rm.DealStatusUnsealing).To(rm.DealStatusFailing). + Action(recordError), + fsm.Event(rm.ProviderEventUnsealComplete). + From(rm.DealStatusUnsealing).To(rm.DealStatusUnsealed), + + // receiving blocks + fsm.Event(rm.ProviderEventBlockSent). + FromMany(rm.DealStatusOngoing).ToNoChange(). + From(rm.DealStatusUnsealed).To(rm.DealStatusOngoing). + Action(func(deal *rm.ProviderDealState, totalSent uint64) error { + deal.TotalSent = totalSent + return nil + }), + fsm.Event(rm.ProviderEventBlocksCompleted). + FromMany(rm.DealStatusOngoing).To(rm.DealStatusBlocksComplete), + + // request payment + fsm.Event(rm.ProviderEventPaymentRequested). + FromMany(rm.DealStatusOngoing, rm.DealStatusUnsealed).To(rm.DealStatusFundsNeeded). + From(rm.DealStatusFundsNeeded).ToJustRecord(). + From(rm.DealStatusBlocksComplete).To(rm.DealStatusFundsNeededLastPayment). + From(rm.DealStatusNew).To(rm.DealStatusFundsNeededUnseal). + Action(func(deal *rm.ProviderDealState, totalSent uint64) error { + deal.TotalSent = totalSent + return nil + }), + + // receive and process payment + fsm.Event(rm.ProviderEventSaveVoucherFailed). + FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFailing). + Action(recordError), + fsm.Event(rm.ProviderEventPartialPaymentReceived). + FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).ToNoChange(). + Action(func(deal *rm.ProviderDealState, fundsReceived abi.TokenAmount) error { + deal.FundsReceived = big.Add(deal.FundsReceived, fundsReceived) + return nil + }), + fsm.Event(rm.ProviderEventPaymentReceived). + From(rm.DealStatusFundsNeeded).To(rm.DealStatusOngoing). + From(rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFinalizing). + From(rm.DealStatusFundsNeededUnseal).To(rm.DealStatusUnsealing). + FromMany(rm.DealStatusBlocksComplete, rm.DealStatusOngoing, rm.DealStatusFinalizing).ToJustRecord(). + Action(func(deal *rm.ProviderDealState, fundsReceived abi.TokenAmount) error { + deal.FundsReceived = big.Add(deal.FundsReceived, fundsReceived) + + // only update interval if the payment is for bytes and not for unsealing. + if deal.Status != rm.DealStatusFundsNeededUnseal { + deal.CurrentInterval = deal.NextInterval() + } + return nil + }), + + // completing + fsm.Event(rm.ProviderEventComplete).FromMany(rm.DealStatusBlocksComplete, rm.DealStatusFinalizing).To(rm.DealStatusCompleting), + fsm.Event(rm.ProviderEventCleanupComplete).From(rm.DealStatusCompleting).To(rm.DealStatusCompleted), + + // Cancellation / Error cleanup + fsm.Event(rm.ProviderEventCancelComplete). + From(rm.DealStatusCancelling).To(rm.DealStatusCancelled). + From(rm.DealStatusFailing).To(rm.DealStatusErrored), + + // data transfer errors + fsm.Event(rm.ProviderEventDataTransferError). + FromAny().To(rm.DealStatusErrored). + Action(recordError), + + // multistore errors + fsm.Event(rm.ProviderEventMultiStoreError). + FromAny().To(rm.DealStatusErrored). + Action(recordError), + + fsm.Event(rm.ProviderEventClientCancelled). + From(rm.DealStatusFailing).ToJustRecord(). + From(rm.DealStatusCancelling).ToJustRecord(). + FromAny().To(rm.DealStatusCancelling).Action( + func(deal *rm.ProviderDealState) error { + if deal.Status != rm.DealStatusFailing { + deal.Message = "Client cancelled retrieval" + } + return nil + }, + ), +} + +// ProviderStateEntryFuncs are the handlers for different states in a retrieval provider +var ProviderStateEntryFuncs = fsm.StateEntryFuncs{ + rm.DealStatusFundsNeededUnseal: TrackTransfer, + rm.DealStatusUnsealing: UnsealData, + rm.DealStatusUnsealed: UnpauseDeal, + rm.DealStatusFailing: CancelDeal, + rm.DealStatusCancelling: CancelDeal, + rm.DealStatusCompleting: CleanupDeal, +} + +// ProviderFinalityStates are the terminal states for a retrieval provider +var ProviderFinalityStates = []fsm.StateKey{ + rm.DealStatusErrored, + rm.DealStatusCompleted, + rm.DealStatusCancelled, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states.go new file mode 100644 index 00000000000..1e33f0c09a0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states.go @@ -0,0 +1,99 @@ +package providerstates + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine" + "github.com/filecoin-project/go-statemachine/fsm" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +var log = logging.Logger("retrieval-fsm") + +// ProviderDealEnvironment is a bridge to the environment a provider deal is executing in +// It provides access to relevant functionality on the retrieval provider +type ProviderDealEnvironment interface { + // Node returns the node interface for this deal + Node() rm.RetrievalProviderNode + PrepareBlockstore(ctx context.Context, dealID rm.DealID, pieceCid cid.Cid) error + TrackTransfer(deal rm.ProviderDealState) error + UntrackTransfer(deal rm.ProviderDealState) error + DeleteStore(dealID rm.DealID) error + ResumeDataTransfer(context.Context, datatransfer.ChannelID) error + CloseDataTransfer(context.Context, datatransfer.ChannelID) error +} + +// UnsealData fetches the piece containing data needed for the retrieval, +// unsealing it if necessary +func UnsealData(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { + if err := environment.PrepareBlockstore(ctx.Context(), deal.ID, deal.PieceInfo.PieceCID); err != nil { + return ctx.Trigger(rm.ProviderEventUnsealError, err) + } + log.Debugf("blockstore prepared successfully, firing unseal complete for deal %d", deal.ID) + return ctx.Trigger(rm.ProviderEventUnsealComplete) +} + +// TrackTransfer resumes a deal so we can start sending data after its unsealed +func TrackTransfer(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { + err := environment.TrackTransfer(deal) + if err != nil { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + return nil +} + +// UnpauseDeal resumes a deal so we can start sending data after its unsealed +func UnpauseDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { + log.Debugf("unpausing data transfer for deal %d", deal.ID) + err := environment.TrackTransfer(deal) + if err != nil { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + if deal.ChannelID != nil { + log.Debugf("resuming data transfer for deal %d", deal.ID) + err = environment.ResumeDataTransfer(ctx.Context(), *deal.ChannelID) + if err != nil { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + } + return nil +} + +// CancelDeal clears a deal that went wrong for an unknown reason +func CancelDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { + // Read next response (or fail) + err := environment.UntrackTransfer(deal) + if err != nil { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + err = environment.DeleteStore(deal.ID) + if err != nil { + return ctx.Trigger(rm.ProviderEventMultiStoreError, err) + } + if deal.ChannelID != nil { + err = environment.CloseDataTransfer(ctx.Context(), *deal.ChannelID) + if err != nil && !errors.Is(err, statemachine.ErrTerminated) { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + } + return ctx.Trigger(rm.ProviderEventCancelComplete) +} + +// CleanupDeal runs to do memory cleanup for an in progress deal +func CleanupDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { + err := environment.UntrackTransfer(deal) + if err != nil { + return ctx.Trigger(rm.ProviderEventDataTransferError, err) + } + err = environment.DeleteStore(deal.ID) + if err != nil { + return ctx.Trigger(rm.ProviderEventMultiStoreError, err) + } + return ctx.Trigger(rm.ProviderEventCleanupComplete) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states_test.go new file mode 100644 index 00000000000..49a9e554e51 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/providerstates/provider_states_test.go @@ -0,0 +1,275 @@ +package providerstates_test + +import ( + "context" + "errors" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" + + "github.com/filecoin-project/go-fil-markets/piecestore" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + rmtesting "github.com/filecoin-project/go-fil-markets/retrievalmarket/testing" + testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestUnsealData(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(rm.ProviderDealState{}, "Status", providerstates.ProviderEvents) + require.NoError(t, err) + runUnsealData := func(t *testing.T, + node *testnodes.TestRetrievalProviderNode, + setupEnv func(e *rmtesting.TestProviderDealEnvironment), + dealState *rm.ProviderDealState) { + environment := rmtesting.NewTestProviderDealEnvironment(node) + setupEnv(environment) + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := providerstates.UnsealData(fsmCtx, environment, *dealState) + require.NoError(t, err) + node.VerifyExpectations(t) + fsmCtx.ReplayEvents(t, dealState) + } + + expectedPiece := testnet.GenerateCids(1)[0] + proposal := rm.DealProposal{ + ID: rm.DealID(10), + PayloadCID: expectedPiece, + Params: rm.NewParamsV0(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease), + } + + pieceCid := testnet.GenerateCids(1)[0] + + sectorID := abi.SectorNumber(rand.Uint64()) + offset := abi.PaddedPieceSize(rand.Uint64()) + length := abi.PaddedPieceSize(rand.Uint64()) + + sectorID2 := abi.SectorNumber(rand.Uint64()) + offset2 := abi.PaddedPieceSize(rand.Uint64()) + length2 := abi.PaddedPieceSize(rand.Uint64()) + + makeDeals := func() *rm.ProviderDealState { + return &rm.ProviderDealState{ + DealProposal: proposal, + Status: rm.DealStatusUnsealing, + PieceInfo: &piecestore.PieceInfo{ + PieceCID: pieceCid, + Deals: []piecestore.DealInfo{ + { + DealID: abi.DealID(rand.Uint64()), + SectorID: sectorID, + Offset: offset, + Length: length, + }, + { + DealID: abi.DealID(rand.Uint64()), + SectorID: sectorID2, + Offset: offset2, + Length: length2, + }, + }, + }, + TotalSent: 0, + FundsReceived: abi.NewTokenAmount(0), + } + } + + t.Run("unseals successfully", func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + dealState := makeDeals() + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) {} + runUnsealData(t, node, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusUnsealed) + }) + + t.Run("PrepareBlockstore error", func(t *testing.T) { + node := testnodes.NewTestRetrievalProviderNode() + dealState := makeDeals() + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.PrepareBlockstoreError = errors.New("Something went wrong") + } + runUnsealData(t, node, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusFailing) + require.Equal(t, dealState.Message, "Something went wrong") + }) +} + +func TestUnpauseDeal(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(rm.ProviderDealState{}, "Status", providerstates.ProviderEvents) + require.NoError(t, err) + runUnpauseDeal := func(t *testing.T, + setupEnv func(e *rmtesting.TestProviderDealEnvironment), + dealState *rm.ProviderDealState) { + node := testnodes.NewTestRetrievalProviderNode() + environment := rmtesting.NewTestProviderDealEnvironment(node) + setupEnv(environment) + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + dealState.ChannelID = &datatransfer.ChannelID{ + Initiator: "initiator", + Responder: dealState.Receiver, + ID: 1, + } + err := providerstates.UnpauseDeal(fsmCtx, environment, *dealState) + require.NoError(t, err) + node.VerifyExpectations(t) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusUnsealed) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) {} + runUnpauseDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusUnsealed) + }) + t.Run("error tracking channel", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusUnsealed) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.TrackTransferError = errors.New("something went wrong tracking") + } + runUnpauseDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong tracking") + }) + t.Run("error resuming channel", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusUnsealed) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.ResumeDataTransferError = errors.New("something went wrong resuming") + } + runUnpauseDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong resuming") + }) +} + +func TestCancelDeal(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(rm.ProviderDealState{}, "Status", providerstates.ProviderEvents) + require.NoError(t, err) + runCancelDeal := func(t *testing.T, + setupEnv func(e *rmtesting.TestProviderDealEnvironment), + dealState *rm.ProviderDealState) { + node := testnodes.NewTestRetrievalProviderNode() + environment := rmtesting.NewTestProviderDealEnvironment(node) + setupEnv(environment) + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + dealState.ChannelID = &datatransfer.ChannelID{ + Initiator: "initiator", + Responder: dealState.Receiver, + ID: 1, + } + err := providerstates.CancelDeal(fsmCtx, environment, *dealState) + require.NoError(t, err) + node.VerifyExpectations(t) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusFailing) + dealState.Message = "Existing error" + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) {} + runCancelDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "Existing error") + }) + t.Run("error untracking channel", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusFailing) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.UntrackTransferError = errors.New("something went wrong untracking") + } + runCancelDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong untracking") + }) + t.Run("error deleting store", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusFailing) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.DeleteStoreError = errors.New("something went wrong deleting store") + } + runCancelDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong deleting store") + }) + t.Run("error closing channel", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusFailing) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.CloseDataTransferError = errors.New("something went wrong closing") + } + runCancelDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong closing") + }) +} + +func TestCleanupDeal(t *testing.T) { + ctx := context.Background() + eventMachine, err := fsm.NewEventProcessor(rm.ProviderDealState{}, "Status", providerstates.ProviderEvents) + require.NoError(t, err) + runCleanupDeal := func(t *testing.T, + setupEnv func(e *rmtesting.TestProviderDealEnvironment), + dealState *rm.ProviderDealState) { + node := testnodes.NewTestRetrievalProviderNode() + environment := rmtesting.NewTestProviderDealEnvironment(node) + setupEnv(environment) + fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) + err := providerstates.CleanupDeal(fsmCtx, environment, *dealState) + require.NoError(t, err) + node.VerifyExpectations(t) + fsmCtx.ReplayEvents(t, dealState) + } + + t.Run("it works", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusCompleting) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) {} + runCleanupDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusCompleted) + }) + t.Run("error untracking channel", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusCompleting) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.UntrackTransferError = errors.New("something went wrong untracking") + } + runCleanupDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong untracking") + }) + t.Run("error deleting store", func(t *testing.T) { + dealState := makeDealState(rm.DealStatusCompleting) + setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { + fe.DeleteStoreError = errors.New("something went wrong deleting store") + } + runCleanupDeal(t, setupEnv, dealState) + require.Equal(t, dealState.Status, rm.DealStatusErrored) + require.Equal(t, dealState.Message, "something went wrong deleting store") + }) + +} + +var dealID = rm.DealID(10) +var defaultCurrentInterval = uint64(1000) +var defaultIntervalIncrease = uint64(500) +var defaultPricePerByte = abi.NewTokenAmount(500) +var defaultPaymentPerInterval = big.Mul(defaultPricePerByte, abi.NewTokenAmount(int64(defaultCurrentInterval))) +var defaultTotalSent = uint64(5000) +var defaultFundsReceived = abi.NewTokenAmount(2500000) + +func makeDealState(status rm.DealStatus) *rm.ProviderDealState { + return &rm.ProviderDealState{ + Status: status, + TotalSent: defaultTotalSent, + CurrentInterval: defaultCurrentInterval, + FundsReceived: defaultFundsReceived, + DealProposal: rm.DealProposal{ + ID: dealID, + Params: rm.NewParamsV0(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease), + }, + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation.go new file mode 100644 index 00000000000..95a8fed101b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation.go @@ -0,0 +1,197 @@ +package requestvalidation + +import ( + "bytes" + "context" + "errors" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + peer "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +var allSelectorBytes []byte + +var askTimeout = 5 * time.Second + +func init() { + buf := new(bytes.Buffer) + _ = dagcbor.Encode(selectorparse.CommonSelector_ExploreAllRecursively, buf) + allSelectorBytes = buf.Bytes() +} + +// ValidationEnvironment contains the dependencies needed to validate deals +type ValidationEnvironment interface { + GetAsk(ctx context.Context, payloadCid cid.Cid, pieceCid *cid.Cid, piece piecestore.PieceInfo, isUnsealed bool, client peer.ID) (retrievalmarket.Ask, error) + + GetPiece(c cid.Cid, pieceCID *cid.Cid) (piecestore.PieceInfo, bool, error) + // CheckDealParams verifies the given deal params are acceptable + CheckDealParams(ask retrievalmarket.Ask, pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, unsealPrice abi.TokenAmount) error + // RunDealDecisioningLogic runs custom deal decision logic to decide if a deal is accepted, if present + RunDealDecisioningLogic(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) + // StateMachines returns the FSM Group to begin tracking with + BeginTracking(pds retrievalmarket.ProviderDealState) error +} + +// ProviderRequestValidator validates incoming requests for the Retrieval Provider +type ProviderRequestValidator struct { + env ValidationEnvironment +} + +// NewProviderRequestValidator returns a new instance of the ProviderRequestValidator +func NewProviderRequestValidator(env ValidationEnvironment) *ProviderRequestValidator { + return &ProviderRequestValidator{env} +} + +// ValidatePush validates a push request received from the peer that will send data +func (rv *ProviderRequestValidator) ValidatePush(isRestart bool, _ datatransfer.ChannelID, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + return nil, errors.New("No pushes accepted") +} + +// ValidatePull validates a pull request received from the peer that will receive data +func (rv *ProviderRequestValidator) ValidatePull(isRestart bool, _ datatransfer.ChannelID, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + proposal, ok := voucher.(*retrievalmarket.DealProposal) + var legacyProtocol bool + if !ok { + legacyProposal, ok := voucher.(*migrations.DealProposal0) + if !ok { + return nil, errors.New("wrong voucher type") + } + newProposal := migrations.MigrateDealProposal0To1(*legacyProposal) + proposal = &newProposal + legacyProtocol = true + } + response, err := rv.validatePull(isRestart, receiver, proposal, legacyProtocol, baseCid, selector) + if response == nil { + return nil, err + } + if legacyProtocol { + downgradedResponse := migrations.DealResponse0{ + Status: response.Status, + ID: response.ID, + Message: response.Message, + PaymentOwed: response.PaymentOwed, + } + return &downgradedResponse, err + } + return response, err +} + +// validatePull is called by the data provider when a new graphsync pull +// request is created. This can be the initial pull request or a new request +// created when the data transfer is restarted (eg after a connection failure). +// By default the graphsync request starts immediately sending data, unless +// validatePull returns ErrPause or the data-transfer has not yet started +// (because the provider is still unsealing the data). +func (rv *ProviderRequestValidator) validatePull(isRestart bool, receiver peer.ID, proposal *retrievalmarket.DealProposal, legacyProtocol bool, baseCid cid.Cid, selector ipld.Node) (*retrievalmarket.DealResponse, error) { + // Check the proposal CID matches + if proposal.PayloadCID != baseCid { + return nil, errors.New("incorrect CID for this proposal") + } + + // Check the proposal selector matches + buf := new(bytes.Buffer) + err := dagcbor.Encode(selector, buf) + if err != nil { + return nil, err + } + bytesCompare := allSelectorBytes + if proposal.SelectorSpecified() { + bytesCompare = proposal.Selector.Raw + } + if !bytes.Equal(buf.Bytes(), bytesCompare) { + return nil, errors.New("incorrect selector for this proposal") + } + + // If the validation is for a restart request, return nil, which means + // the data-transfer should not be explicitly paused or resumed + if isRestart { + return nil, nil + } + + // This is a new graphsync request (not a restart) + pds := retrievalmarket.ProviderDealState{ + DealProposal: *proposal, + Receiver: receiver, + LegacyProtocol: legacyProtocol, + CurrentInterval: proposal.PaymentInterval, + } + + // Decide whether to accept the deal + status, err := rv.acceptDeal(&pds) + + response := retrievalmarket.DealResponse{ + ID: proposal.ID, + Status: status, + } + + if status == retrievalmarket.DealStatusFundsNeededUnseal { + response.PaymentOwed = pds.UnsealPrice + } + + if err != nil { + response.Message = err.Error() + return &response, err + } + + err = rv.env.BeginTracking(pds) + if err != nil { + return nil, err + } + + // Pause the data transfer while unsealing the data. + // The state machine will unpause the transfer when unsealing completes. + return &response, datatransfer.ErrPause +} + +func (rv *ProviderRequestValidator) acceptDeal(deal *retrievalmarket.ProviderDealState) (retrievalmarket.DealStatus, error) { + pieceInfo, isUnsealed, err := rv.env.GetPiece(deal.PayloadCID, deal.PieceCID) + if err != nil { + if err == retrievalmarket.ErrNotFound { + return retrievalmarket.DealStatusDealNotFound, err + } + return retrievalmarket.DealStatusErrored, err + } + + ctx, cancel := context.WithTimeout(context.TODO(), askTimeout) + defer cancel() + + ask, err := rv.env.GetAsk(ctx, deal.PayloadCID, deal.PieceCID, pieceInfo, isUnsealed, deal.Receiver) + if err != nil { + return retrievalmarket.DealStatusErrored, err + } + + // check that the deal parameters match our required parameters or + // reject outright + err = rv.env.CheckDealParams(ask, deal.PricePerByte, deal.PaymentInterval, deal.PaymentIntervalIncrease, deal.UnsealPrice) + if err != nil { + return retrievalmarket.DealStatusRejected, err + } + + accepted, reason, err := rv.env.RunDealDecisioningLogic(context.TODO(), *deal) + if err != nil { + return retrievalmarket.DealStatusErrored, err + } + if !accepted { + return retrievalmarket.DealStatusRejected, errors.New(reason) + } + + deal.PieceInfo = &pieceInfo + + if deal.UnsealPrice.GreaterThan(big.Zero()) { + return retrievalmarket.DealStatusFundsNeededUnseal, nil + } + + return retrievalmarket.DealStatusAccepted, nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation_test.go new file mode 100644 index 00000000000..ae4dfe99a67 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/requestvalidation_test.go @@ -0,0 +1,244 @@ +package requestvalidation_test + +import ( + "context" + "errors" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestValidatePush(t *testing.T) { + fve := &fakeValidationEnvironment{} + sender := shared_testutil.GeneratePeers(1)[0] + voucher := shared_testutil.MakeTestDealProposal() + requestValidator := requestvalidation.NewProviderRequestValidator(fve) + voucherResult, err := requestValidator.ValidatePush(false, datatransfer.ChannelID{}, sender, &voucher, voucher.PayloadCID, selectorparse.CommonSelector_ExploreAllRecursively) + require.Equal(t, nil, voucherResult) + require.Error(t, err) +} + +func TestValidatePull(t *testing.T) { + proposal := shared_testutil.MakeTestDealProposal() + legacyProposal := migrations.DealProposal0{ + PayloadCID: proposal.PayloadCID, + ID: proposal.ID, + Params0: migrations.Params0{ + Selector: proposal.Selector, + PieceCID: proposal.PieceCID, + PricePerByte: proposal.PricePerByte, + PaymentInterval: proposal.PaymentInterval, + PaymentIntervalIncrease: proposal.PaymentIntervalIncrease, + UnsealPrice: proposal.UnsealPrice, + }, + } + testCases := map[string]struct { + isRestart bool + fve fakeValidationEnvironment + sender peer.ID + voucher datatransfer.Voucher + baseCid cid.Cid + selector ipld.Node + expectedVoucherResult datatransfer.VoucherResult + expectedError error + }{ + "not a retrieval voucher": { + expectedError: errors.New("wrong voucher type"), + }, + "proposal and base cid do not match": { + baseCid: shared_testutil.GenerateCids(1)[0], + voucher: &proposal, + expectedError: errors.New("incorrect CID for this proposal"), + }, + "proposal and selector do not match": { + baseCid: proposal.PayloadCID, + selector: builder.NewSelectorSpecBuilder(basicnode.Prototype.Any).Matcher().Node(), + voucher: &proposal, + expectedError: errors.New("incorrect selector for this proposal"), + }, + "get piece other err": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicAccepted: true, + GetPieceErr: errors.New("something went wrong"), + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: errors.New("something went wrong"), + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusErrored, + ID: proposal.ID, + Message: "something went wrong", + }, + }, + "get piece not found err": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicAccepted: true, + GetPieceErr: retrievalmarket.ErrNotFound, + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: retrievalmarket.ErrNotFound, + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusDealNotFound, + ID: proposal.ID, + Message: retrievalmarket.ErrNotFound.Error(), + }, + }, + "check deal params err": { + fve: fakeValidationEnvironment{ + CheckDealParamsError: errors.New("something went wrong"), + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: errors.New("something went wrong"), + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusRejected, + ID: proposal.ID, + Message: "something went wrong", + }, + }, + "run deal decioning error": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicError: errors.New("something went wrong"), + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: errors.New("something went wrong"), + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusErrored, + ID: proposal.ID, + Message: "something went wrong", + }, + }, + "run deal decioning rejected": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicFailReason: "something went wrong", + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: errors.New("something went wrong"), + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusRejected, + ID: proposal.ID, + Message: "something went wrong", + }, + }, + "begin tracking error": { + fve: fakeValidationEnvironment{ + BeginTrackingError: errors.New("everything is awful"), + RunDealDecisioningLogicAccepted: true, + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: errors.New("everything is awful"), + }, + "success": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicAccepted: true, + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: datatransfer.ErrPause, + expectedVoucherResult: &retrievalmarket.DealResponse{ + Status: retrievalmarket.DealStatusAccepted, + ID: proposal.ID, + }, + }, + "success, legacyProposal": { + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicAccepted: true, + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &legacyProposal, + expectedError: datatransfer.ErrPause, + expectedVoucherResult: &migrations.DealResponse0{ + Status: retrievalmarket.DealStatusAccepted, + ID: proposal.ID, + }, + }, + "restart": { + isRestart: true, + fve: fakeValidationEnvironment{ + RunDealDecisioningLogicAccepted: true, + }, + baseCid: proposal.PayloadCID, + selector: selectorparse.CommonSelector_ExploreAllRecursively, + voucher: &proposal, + expectedError: nil, + expectedVoucherResult: nil, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + requestValidator := requestvalidation.NewProviderRequestValidator(&data.fve) + voucherResult, err := requestValidator.ValidatePull(data.isRestart, datatransfer.ChannelID{}, data.sender, data.voucher, data.baseCid, data.selector) + require.Equal(t, data.expectedVoucherResult, voucherResult) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + require.EqualError(t, err, data.expectedError.Error()) + } + }) + } +} + +type fakeValidationEnvironment struct { + IsUnsealedPiece bool + PieceInfo piecestore.PieceInfo + GetPieceErr error + CheckDealParamsError error + RunDealDecisioningLogicAccepted bool + RunDealDecisioningLogicFailReason string + RunDealDecisioningLogicError error + BeginTrackingError error + + Ask retrievalmarket.Ask +} + +func (fve *fakeValidationEnvironment) GetAsk(ctx context.Context, payloadCid cid.Cid, pieceCid *cid.Cid, + piece piecestore.PieceInfo, isUnsealed bool, client peer.ID) (retrievalmarket.Ask, error) { + return fve.Ask, nil +} + +func (fve *fakeValidationEnvironment) GetPiece(c cid.Cid, pieceCID *cid.Cid) (piecestore.PieceInfo, bool, error) { + return fve.PieceInfo, fve.IsUnsealedPiece, fve.GetPieceErr +} + +// CheckDealParams verifies the given deal params are acceptable +func (fve *fakeValidationEnvironment) CheckDealParams(ask retrievalmarket.Ask, pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, unsealPrice abi.TokenAmount) error { + return fve.CheckDealParamsError +} + +// RunDealDecisioningLogic runs custom deal decision logic to decide if a deal is accepted, if present +func (fve *fakeValidationEnvironment) RunDealDecisioningLogic(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { + return fve.RunDealDecisioningLogicAccepted, fve.RunDealDecisioningLogicFailReason, fve.RunDealDecisioningLogicError +} + +// StateMachines returns the FSM Group to begin tracking with +func (fve *fakeValidationEnvironment) BeginTracking(pds retrievalmarket.ProviderDealState) error { + return fve.BeginTrackingError +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator.go new file mode 100644 index 00000000000..3c5b56ecb01 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator.go @@ -0,0 +1,372 @@ +package requestvalidation + +import ( + "context" + "errors" + "sync" + + logging "github.com/ipfs/go-log/v2" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +var log = logging.Logger("markets-rtvl-reval") + +// RevalidatorEnvironment are the dependencies needed to +// build the logic of revalidation -- essentially, access to the node at statemachines +type RevalidatorEnvironment interface { + Node() rm.RetrievalProviderNode + SendEvent(dealID rm.ProviderDealIdentifier, evt rm.ProviderEvent, args ...interface{}) error + Get(dealID rm.ProviderDealIdentifier) (rm.ProviderDealState, error) +} + +type channelData struct { + dealID rm.ProviderDealIdentifier + totalSent uint64 + totalPaidFor uint64 + interval uint64 + pricePerByte abi.TokenAmount + reload bool + legacyProtocol bool +} + +// ProviderRevalidator defines data transfer revalidation logic in the context of +// a provider for a retrieval deal +type ProviderRevalidator struct { + env RevalidatorEnvironment + trackedChannelsLk sync.RWMutex + trackedChannels map[datatransfer.ChannelID]*channelData +} + +// NewProviderRevalidator returns a new instance of a ProviderRevalidator +func NewProviderRevalidator(env RevalidatorEnvironment) *ProviderRevalidator { + return &ProviderRevalidator{ + env: env, + trackedChannels: make(map[datatransfer.ChannelID]*channelData), + } +} + +// TrackChannel indicates a retrieval deal tracked by this provider. It associates +// a given channel ID with a retrieval deal, so that checks run for data sent +// on the channel +func (pr *ProviderRevalidator) TrackChannel(deal rm.ProviderDealState) { + if deal.ChannelID == nil { + return + } + + pr.trackedChannelsLk.Lock() + defer pr.trackedChannelsLk.Unlock() + pr.trackedChannels[*deal.ChannelID] = &channelData{ + dealID: deal.Identifier(), + } + pr.writeDealState(deal) +} + +// UntrackChannel indicates a retrieval deal is finish and no longer is tracked +// by this provider +func (pr *ProviderRevalidator) UntrackChannel(deal rm.ProviderDealState) { + // Sanity check + if deal.ChannelID == nil { + log.Errorf("cannot untrack deal %s: channel ID is nil", deal.ID) + return + } + + pr.trackedChannelsLk.Lock() + defer pr.trackedChannelsLk.Unlock() + delete(pr.trackedChannels, *deal.ChannelID) +} + +func (pr *ProviderRevalidator) loadDealState(channel *channelData) error { + if !channel.reload { + return nil + } + deal, err := pr.env.Get(channel.dealID) + if err != nil { + return err + } + pr.writeDealState(deal) + channel.reload = false + return nil +} + +func (pr *ProviderRevalidator) writeDealState(deal rm.ProviderDealState) { + channel := pr.trackedChannels[*deal.ChannelID] + channel.totalSent = deal.TotalSent + if !deal.PricePerByte.IsZero() { + channel.totalPaidFor = big.Div(big.Max(big.Sub(deal.FundsReceived, deal.UnsealPrice), big.Zero()), deal.PricePerByte).Uint64() + } + channel.interval = deal.CurrentInterval + channel.pricePerByte = deal.PricePerByte + channel.legacyProtocol = deal.LegacyProtocol +} + +// Revalidate revalidates a request with a new voucher +func (pr *ProviderRevalidator) Revalidate(channelID datatransfer.ChannelID, voucher datatransfer.Voucher) (datatransfer.VoucherResult, error) { + pr.trackedChannelsLk.RLock() + defer pr.trackedChannelsLk.RUnlock() + channel, ok := pr.trackedChannels[channelID] + if !ok { + return nil, nil + } + + // read payment, or fail + payment, ok := voucher.(*rm.DealPayment) + var legacyProtocol bool + if !ok { + legacyPayment, ok := voucher.(*migrations.DealPayment0) + if !ok { + return nil, errors.New("wrong voucher type") + } + newPayment := migrations.MigrateDealPayment0To1(*legacyPayment) + payment = &newPayment + legacyProtocol = true + } + + response, err := pr.processPayment(channel.dealID, payment) + if err == nil || err == datatransfer.ErrResume { + channel.reload = true + } + return finalResponse(response, legacyProtocol), err +} + +func (pr *ProviderRevalidator) processPayment(dealID rm.ProviderDealIdentifier, payment *rm.DealPayment) (*retrievalmarket.DealResponse, error) { + + tok, _, err := pr.env.Node().GetChainHead(context.TODO()) + if err != nil { + _ = pr.env.SendEvent(dealID, rm.ProviderEventSaveVoucherFailed, err) + return errorDealResponse(dealID, err), err + } + + deal, err := pr.env.Get(dealID) + if err != nil { + return errorDealResponse(dealID, err), err + } + + // Save voucher + received, err := pr.env.Node().SavePaymentVoucher(context.TODO(), payment.PaymentChannel, payment.PaymentVoucher, nil, big.Zero(), tok) + if err != nil { + _ = pr.env.SendEvent(dealID, rm.ProviderEventSaveVoucherFailed, err) + return errorDealResponse(dealID, err), err + } + + totalPaid := big.Add(deal.FundsReceived, received) + + // check if all payments are received to continue the deal, or send updated required payment + owed := paymentOwed(deal, totalPaid) + + log.Debugf("provider: owed %d: received voucher for %d, total received %d = received so far %d + newly received %d, total sent %d, unseal price %d, price per byte %d", + owed, payment.PaymentVoucher.Amount, totalPaid, deal.FundsReceived, received, deal.TotalSent, deal.UnsealPrice, deal.PricePerByte) + + if owed.GreaterThan(big.Zero()) { + log.Debugf("provider: owed %d: sending partial payment request", owed) + _ = pr.env.SendEvent(dealID, rm.ProviderEventPartialPaymentReceived, received) + return &rm.DealResponse{ + ID: deal.ID, + Status: deal.Status, + PaymentOwed: owed, + }, datatransfer.ErrPause + } + + // resume deal + _ = pr.env.SendEvent(dealID, rm.ProviderEventPaymentReceived, received) + + if deal.Status == rm.DealStatusFundsNeededLastPayment { + log.Debugf("provider: funds needed: last payment") + return &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusCompleted, + }, datatransfer.ErrResume + } + + // We shouldn't resume the data transfer if we haven't finished unsealing/reading the unsealed data into the + // local block-store. + if deal.Status == rm.DealStatusUnsealing || deal.Status == rm.DealStatusFundsNeededUnseal { + return nil, nil + } + + return nil, datatransfer.ErrResume +} + +func paymentOwed(deal rm.ProviderDealState, totalPaid big.Int) big.Int { + // Check if the payment covers unsealing + if totalPaid.LessThan(deal.UnsealPrice) { + log.Debugf("provider: total paid %d < unseal price %d", totalPaid, deal.UnsealPrice) + return big.Sub(deal.UnsealPrice, totalPaid) + } + + // Calculate how much payment has been made for transferred data + transferPayment := big.Sub(totalPaid, deal.UnsealPrice) + + // The provider sends data and the client sends payment for the data. + // The provider will send a limited amount of extra data before receiving + // payment. Given the current limit, check if the client has paid enough + // to unlock the next interval. + currentLimitLower := deal.IntervalLowerBound() + + log.Debugf("provider: total sent %d bytes, but require payment for interval lower bound %d bytes", + deal.TotalSent, currentLimitLower) + + // Calculate the minimum required payment + totalPaymentRequired := big.Mul(big.NewInt(int64(currentLimitLower)), deal.PricePerByte) + + // Calculate payment owed + owed := big.Sub(totalPaymentRequired, transferPayment) + log.Debugf("provider: payment owed %d = payment required %d - transfer paid %d", + owed, totalPaymentRequired, transferPayment) + + return owed +} + +func errorDealResponse(dealID rm.ProviderDealIdentifier, err error) *rm.DealResponse { + return &rm.DealResponse{ + ID: dealID.DealID, + Message: err.Error(), + Status: rm.DealStatusErrored, + } +} + +// OnPullDataSent is called on the responder side when more bytes are sent +// for a given pull request. It should return a VoucherResult + ErrPause to +// request revalidation or nil to continue uninterrupted, +// other errors will terminate the request +func (pr *ProviderRevalidator) OnPullDataSent(chid datatransfer.ChannelID, additionalBytesSent uint64) (bool, datatransfer.VoucherResult, error) { + pr.trackedChannelsLk.RLock() + defer pr.trackedChannelsLk.RUnlock() + channel, ok := pr.trackedChannels[chid] + if !ok { + return false, nil, nil + } + + err := pr.loadDealState(channel) + if err != nil { + return true, nil, err + } + + // Calculate how much data has been sent in total + channel.totalSent += additionalBytesSent + if channel.pricePerByte.IsZero() || channel.totalSent < channel.interval { + if !channel.pricePerByte.IsZero() { + log.Debugf("provider: total sent %d < interval %d, sending block", channel.totalSent, channel.interval) + } + return true, nil, pr.env.SendEvent(channel.dealID, rm.ProviderEventBlockSent, channel.totalSent) + } + + // Calculate the payment owed + paymentOwed := big.Mul(abi.NewTokenAmount(int64(channel.totalSent-channel.totalPaidFor)), channel.pricePerByte) + log.Debugf("provider: owed %d = (total sent %d - paid for %d) * price per byte %d: sending payment request", + paymentOwed, channel.totalSent, channel.totalPaidFor, channel.pricePerByte) + + // Request payment + err = pr.env.SendEvent(channel.dealID, rm.ProviderEventPaymentRequested, channel.totalSent) + if err != nil { + return true, nil, err + } + return true, finalResponse(&rm.DealResponse{ + ID: channel.dealID.DealID, + Status: rm.DealStatusFundsNeeded, + PaymentOwed: paymentOwed, + }, channel.legacyProtocol), datatransfer.ErrPause +} + +// OnPushDataReceived is called on the responder side when more bytes are received +// for a given push request. It should return a VoucherResult + ErrPause to +// request revalidation or nil to continue uninterrupted, +// other errors will terminate the request +func (pr *ProviderRevalidator) OnPushDataReceived(chid datatransfer.ChannelID, additionalBytesReceived uint64) (bool, datatransfer.VoucherResult, error) { + return false, nil, nil +} + +// OnComplete is called to make a final request for revalidation -- often for the +// purpose of settlement. +// if VoucherResult is non nil, the request will enter a settlement phase awaiting +// a final update +func (pr *ProviderRevalidator) OnComplete(chid datatransfer.ChannelID) (bool, datatransfer.VoucherResult, error) { + pr.trackedChannelsLk.RLock() + defer pr.trackedChannelsLk.RUnlock() + channel, ok := pr.trackedChannels[chid] + if !ok { + return false, nil, nil + } + + err := pr.loadDealState(channel) + if err != nil { + return true, nil, err + } + + err = pr.env.SendEvent(channel.dealID, rm.ProviderEventBlocksCompleted) + if err != nil { + return true, nil, err + } + + // Calculate how much payment is owed + paymentOwed := big.Mul(abi.NewTokenAmount(int64(channel.totalSent-channel.totalPaidFor)), channel.pricePerByte) + if paymentOwed.Equals(big.Zero()) { + return true, finalResponse(&rm.DealResponse{ + ID: channel.dealID.DealID, + Status: rm.DealStatusCompleted, + }, channel.legacyProtocol), nil + } + + // Send a request for payment + log.Debugf("provider: last payment owed %d = (total sent %d - paid for %d) * price per byte %d", + paymentOwed, channel.totalSent, channel.totalPaidFor, channel.pricePerByte) + err = pr.env.SendEvent(channel.dealID, rm.ProviderEventPaymentRequested, channel.totalSent) + if err != nil { + return true, nil, err + } + return true, finalResponse(&rm.DealResponse{ + ID: channel.dealID.DealID, + Status: rm.DealStatusFundsNeededLastPayment, + PaymentOwed: paymentOwed, + }, channel.legacyProtocol), datatransfer.ErrPause +} + +func finalResponse(response *rm.DealResponse, legacyProtocol bool) datatransfer.Voucher { + if response == nil { + return nil + } + if legacyProtocol { + downgradedResponse := migrations.DealResponse0{ + Status: response.Status, + ID: response.ID, + Message: response.Message, + PaymentOwed: response.PaymentOwed, + } + return &downgradedResponse + } + return response +} + +type legacyRevalidator struct { + providerRevalidator *ProviderRevalidator +} + +func (lrv *legacyRevalidator) Revalidate(channelID datatransfer.ChannelID, voucher datatransfer.Voucher) (datatransfer.VoucherResult, error) { + return lrv.providerRevalidator.Revalidate(channelID, voucher) +} + +func (lrv *legacyRevalidator) OnPullDataSent(chid datatransfer.ChannelID, additionalBytesSent uint64) (bool, datatransfer.VoucherResult, error) { + return false, nil, nil +} + +func (lrv *legacyRevalidator) OnPushDataReceived(chid datatransfer.ChannelID, additionalBytesReceived uint64) (bool, datatransfer.VoucherResult, error) { + return false, nil, nil +} + +func (lrv *legacyRevalidator) OnComplete(chid datatransfer.ChannelID) (bool, datatransfer.VoucherResult, error) { + return false, nil, nil +} + +// NewLegacyRevalidator adds a revalidator that will capture revalidation requests for the legacy protocol but +// won't double count data being sent +// TODO: the data transfer revalidator registration needs to be able to take multiple types to avoid double counting +// for data being sent. +func NewLegacyRevalidator(providerRevalidator *ProviderRevalidator) datatransfer.Revalidator { + return &legacyRevalidator{providerRevalidator: providerRevalidator} +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator_test.go new file mode 100644 index 00000000000..ffe3fccdf3d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/requestvalidation/revalidator_test.go @@ -0,0 +1,577 @@ +package requestvalidation_test + +import ( + "errors" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestOnPushDataReceived(t *testing.T) { + fre := &fakeRevalidatorEnvironment{} + revalidator := requestvalidation.NewProviderRevalidator(fre) + channelID := shared_testutil.MakeTestChannelID() + handled, voucherResult, err := revalidator.OnPushDataReceived(channelID, rand.Uint64()) + require.False(t, handled) + require.NoError(t, err) + require.Nil(t, voucherResult) +} + +func TestOnPullDataSent(t *testing.T) { + deal := *makeDealState(rm.DealStatusOngoing) + dealZeroPricePerByte := deal + dealZeroPricePerByte.PricePerByte = big.Zero() + legacyDeal := deal + legacyDeal.LegacyProtocol = true + testCases := map[string]struct { + noSend bool + expectedID rm.ProviderDealIdentifier + expectedEvent rm.ProviderEvent + expectedArgs []interface{} + deal rm.ProviderDealState + channelID datatransfer.ChannelID + dataAmount uint64 + expectedHandled bool + expectedResult datatransfer.VoucherResult + expectedError error + }{ + "not tracked": { + deal: deal, + channelID: shared_testutil.MakeTestChannelID(), + noSend: true, + }, + "record block": { + deal: deal, + channelID: *deal.ChannelID, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventBlockSent, + expectedArgs: []interface{}{deal.TotalSent + uint64(500)}, + expectedHandled: true, + dataAmount: uint64(500), + }, + "record block zero price per byte": { + deal: dealZeroPricePerByte, + channelID: *dealZeroPricePerByte.ChannelID, + expectedID: dealZeroPricePerByte.Identifier(), + expectedEvent: rm.ProviderEventBlockSent, + expectedArgs: []interface{}{dealZeroPricePerByte.TotalSent + uint64(500)}, + expectedHandled: true, + dataAmount: uint64(500), + }, + "request payment": { + deal: deal, + channelID: *deal.ChannelID, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventPaymentRequested, + expectedArgs: []interface{}{deal.TotalSent + defaultCurrentInterval}, + dataAmount: defaultCurrentInterval, + expectedError: datatransfer.ErrPause, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusFundsNeeded, + PaymentOwed: big.Mul(abi.NewTokenAmount(int64(defaultCurrentInterval)), defaultPricePerByte), + }, + expectedHandled: true, + }, + "request payment, legacy": { + deal: legacyDeal, + channelID: *legacyDeal.ChannelID, + expectedID: legacyDeal.Identifier(), + expectedEvent: rm.ProviderEventPaymentRequested, + expectedArgs: []interface{}{legacyDeal.TotalSent + defaultCurrentInterval}, + dataAmount: defaultCurrentInterval, + expectedError: datatransfer.ErrPause, + expectedResult: &migrations.DealResponse0{ + ID: legacyDeal.ID, + Status: rm.DealStatusFundsNeeded, + PaymentOwed: big.Mul(abi.NewTokenAmount(int64(defaultCurrentInterval)), defaultPricePerByte), + }, + expectedHandled: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + tn := testnodes.NewTestRetrievalProviderNode() + fre := &fakeRevalidatorEnvironment{ + node: tn, + returnedDeal: data.deal, + getError: nil, + } + revalidator := requestvalidation.NewProviderRevalidator(fre) + revalidator.TrackChannel(data.deal) + handled, voucherResult, err := revalidator.OnPullDataSent(data.channelID, data.dataAmount) + require.Equal(t, data.expectedHandled, handled) + require.Equal(t, data.expectedResult, voucherResult) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + require.EqualError(t, err, data.expectedError.Error()) + } + if !data.noSend { + require.Len(t, fre.sentEvents, 1) + event := fre.sentEvents[0] + require.Equal(t, data.expectedID, event.ID) + require.Equal(t, data.expectedEvent, event.Event) + require.Equal(t, data.expectedArgs, event.Args) + } else { + require.Len(t, fre.sentEvents, 0) + } + }) + } +} + +func TestOnComplete(t *testing.T) { + deal := *makeDealState(rm.DealStatusOngoing) + dealZeroPricePerByte := deal + dealZeroPricePerByte.PricePerByte = big.Zero() + legacyDeal := deal + legacyDeal.LegacyProtocol = true + channelID := *deal.ChannelID + testCases := map[string]struct { + expectedEvents []eventSent + deal rm.ProviderDealState + channelID datatransfer.ChannelID + expectedResult datatransfer.VoucherResult + expectedError error + unpaidAmount uint64 + }{ + "unpaid money": { + unpaidAmount: uint64(500), + expectedEvents: []eventSent{ + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlockSent, + Args: []interface{}{deal.TotalSent + 500}, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlocksCompleted, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventPaymentRequested, + Args: []interface{}{deal.TotalSent + 500}, + }, + }, + expectedError: datatransfer.ErrPause, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusFundsNeededLastPayment, + PaymentOwed: big.Mul(big.NewIntUnsigned(500), defaultPricePerByte), + }, + deal: deal, + channelID: channelID, + }, + "unpaid money, legacyDeal": { + unpaidAmount: uint64(500), + expectedEvents: []eventSent{ + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlockSent, + Args: []interface{}{deal.TotalSent + 500}, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlocksCompleted, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventPaymentRequested, + Args: []interface{}{deal.TotalSent + 500}, + }, + }, + expectedError: datatransfer.ErrPause, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: rm.DealStatusFundsNeededLastPayment, + PaymentOwed: big.Mul(big.NewIntUnsigned(500), defaultPricePerByte), + }, + deal: legacyDeal, + channelID: channelID, + }, + "all funds paid": { + unpaidAmount: uint64(0), + expectedEvents: []eventSent{ + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlockSent, + Args: []interface{}{deal.TotalSent}, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlocksCompleted, + }, + }, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusCompleted, + }, + deal: deal, + channelID: channelID, + }, + "all funds paid zero price per byte": { + unpaidAmount: uint64(0), + expectedEvents: []eventSent{ + { + ID: dealZeroPricePerByte.Identifier(), + Event: rm.ProviderEventBlockSent, + Args: []interface{}{dealZeroPricePerByte.TotalSent}, + }, + { + ID: dealZeroPricePerByte.Identifier(), + Event: rm.ProviderEventBlocksCompleted, + }, + }, + expectedResult: &rm.DealResponse{ + ID: dealZeroPricePerByte.ID, + Status: rm.DealStatusCompleted, + }, + deal: dealZeroPricePerByte, + channelID: channelID, + }, + "all funds paid, legacyDeal": { + unpaidAmount: uint64(0), + expectedEvents: []eventSent{ + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlockSent, + Args: []interface{}{deal.TotalSent}, + }, + { + ID: deal.Identifier(), + Event: rm.ProviderEventBlocksCompleted, + }, + }, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: rm.DealStatusCompleted, + }, + deal: legacyDeal, + channelID: channelID, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + tn := testnodes.NewTestRetrievalProviderNode() + fre := &fakeRevalidatorEnvironment{ + node: tn, + returnedDeal: data.deal, + getError: nil, + } + revalidator := requestvalidation.NewProviderRevalidator(fre) + revalidator.TrackChannel(data.deal) + _, _, err := revalidator.OnPullDataSent(data.channelID, data.unpaidAmount) + require.NoError(t, err) + handled, voucherResult, err := revalidator.OnComplete(data.channelID) + require.True(t, handled) + require.Equal(t, data.expectedResult, voucherResult) + if data.expectedError != nil { + require.EqualError(t, err, data.expectedError.Error()) + } else { + require.NoError(t, err) + } + require.Equal(t, data.expectedEvents, fre.sentEvents) + }) + } +} + +func TestRevalidate(t *testing.T) { + payCh := address.TestAddress + + voucher := shared_testutil.MakeTestSignedVoucher() + voucher.Amount = big.Add(defaultFundsReceived, defaultPaymentPerInterval) + + smallerPaymentAmt := abi.NewTokenAmount(int64(defaultPaymentInterval / 2)) + smallerVoucher := shared_testutil.MakeTestSignedVoucher() + smallerVoucher.Amount = big.Add(defaultFundsReceived, smallerPaymentAmt) + + deal := *makeDealState(rm.DealStatusFundsNeeded) + deal.TotalSent = defaultTotalSent + defaultPaymentInterval + defaultPaymentInterval/2 + channelID := *deal.ChannelID + payment := &retrievalmarket.DealPayment{ + ID: deal.ID, + PaymentChannel: payCh, + PaymentVoucher: voucher, + } + smallerPayment := &retrievalmarket.DealPayment{ + ID: deal.ID, + PaymentChannel: payCh, + PaymentVoucher: smallerVoucher, + } + legacyPayment := &migrations.DealPayment0{ + ID: deal.ID, + PaymentChannel: payCh, + PaymentVoucher: voucher, + } + legacySmallerPayment := &migrations.DealPayment0{ + ID: deal.ID, + PaymentChannel: payCh, + PaymentVoucher: smallerVoucher, + } + lastPaymentDeal := deal + lastPaymentDeal.Status = rm.DealStatusFundsNeededLastPayment + testCases := map[string]struct { + configureTestNode func(tn *testnodes.TestRetrievalProviderNode) + noSend bool + expectedID rm.ProviderDealIdentifier + expectedEvent rm.ProviderEvent + expectedArgs []interface{} + getError error + deal rm.ProviderDealState + channelID datatransfer.ChannelID + voucher datatransfer.Voucher + expectedResult datatransfer.VoucherResult + expectedError error + }{ + "not tracked": { + deal: deal, + channelID: shared_testutil.MakeTestChannelID(), + noSend: true, + }, + "not a payment voucher": { + deal: deal, + channelID: channelID, + noSend: true, + expectedError: errors.New("wrong voucher type"), + }, + "error getting chain head": { + configureTestNode: func(tn *testnodes.TestRetrievalProviderNode) { + tn.ChainHeadError = errors.New("something went wrong") + }, + deal: deal, + channelID: channelID, + voucher: payment, + expectedError: errors.New("something went wrong"), + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventSaveVoucherFailed, + expectedArgs: []interface{}{errors.New("something went wrong")}, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusErrored, + Message: "something went wrong", + }, + }, + "error getting chain head, legacyPayment": { + configureTestNode: func(tn *testnodes.TestRetrievalProviderNode) { + tn.ChainHeadError = errors.New("something went wrong") + }, + deal: deal, + channelID: channelID, + voucher: legacyPayment, + expectedError: errors.New("something went wrong"), + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventSaveVoucherFailed, + expectedArgs: []interface{}{errors.New("something went wrong")}, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: rm.DealStatusErrored, + Message: "something went wrong", + }, + }, + "payment voucher error": { + configureTestNode: func(tn *testnodes.TestRetrievalProviderNode) { + _ = tn.ExpectVoucher(payCh, voucher, nil, voucher.Amount, abi.NewTokenAmount(0), errors.New("your money's no good here")) + }, + deal: deal, + channelID: channelID, + voucher: payment, + expectedError: errors.New("your money's no good here"), + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventSaveVoucherFailed, + expectedArgs: []interface{}{errors.New("your money's no good here")}, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusErrored, + Message: "your money's no good here", + }, + }, + "payment voucher error, legacy payment": { + configureTestNode: func(tn *testnodes.TestRetrievalProviderNode) { + _ = tn.ExpectVoucher(payCh, voucher, nil, voucher.Amount, abi.NewTokenAmount(0), errors.New("your money's no good here")) + }, + deal: deal, + channelID: channelID, + voucher: legacyPayment, + expectedError: errors.New("your money's no good here"), + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventSaveVoucherFailed, + expectedArgs: []interface{}{errors.New("your money's no good here")}, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: rm.DealStatusErrored, + Message: "your money's no good here", + }, + }, + "not enough funds send": { + deal: deal, + channelID: channelID, + voucher: smallerPayment, + expectedError: datatransfer.ErrPause, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventPartialPaymentReceived, + expectedArgs: []interface{}{smallerPaymentAmt}, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: deal.Status, + PaymentOwed: big.Sub(defaultPaymentPerInterval, smallerPaymentAmt), + }, + }, + "not enough funds send, legacyPayment": { + deal: deal, + channelID: channelID, + voucher: legacySmallerPayment, + expectedError: datatransfer.ErrPause, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventPartialPaymentReceived, + expectedArgs: []interface{}{smallerPaymentAmt}, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: deal.Status, + PaymentOwed: big.Sub(defaultPaymentPerInterval, smallerPaymentAmt), + }, + }, + "it works": { + deal: deal, + channelID: channelID, + voucher: payment, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventPaymentReceived, + expectedArgs: []interface{}{defaultPaymentPerInterval}, + expectedError: datatransfer.ErrResume, + }, + + "it completes": { + deal: lastPaymentDeal, + channelID: channelID, + voucher: payment, + expectedID: deal.Identifier(), + expectedEvent: rm.ProviderEventPaymentReceived, + expectedArgs: []interface{}{defaultPaymentPerInterval}, + expectedError: datatransfer.ErrResume, + expectedResult: &rm.DealResponse{ + ID: deal.ID, + Status: rm.DealStatusCompleted, + }, + }, + "it completes, legacy payment": { + deal: lastPaymentDeal, + channelID: channelID, + voucher: legacyPayment, + expectedID: deal.Identifier(), + expectedError: datatransfer.ErrResume, + expectedEvent: rm.ProviderEventPaymentReceived, + expectedArgs: []interface{}{defaultPaymentPerInterval}, + expectedResult: &migrations.DealResponse0{ + ID: deal.ID, + Status: rm.DealStatusCompleted, + }, + }, + "voucher already saved": { + deal: deal, + channelID: channelID, + voucher: payment, + expectedID: deal.Identifier(), + expectedError: datatransfer.ErrResume, + expectedEvent: rm.ProviderEventPaymentReceived, + expectedArgs: []interface{}{defaultPaymentPerInterval}, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + tn := testnodes.NewTestRetrievalProviderNode() + tn.AddReceivedVoucher(deal.FundsReceived) + if data.configureTestNode != nil { + data.configureTestNode(tn) + } + fre := &fakeRevalidatorEnvironment{ + node: tn, + returnedDeal: data.deal, + getError: data.getError, + } + revalidator := requestvalidation.NewProviderRevalidator(fre) + revalidator.TrackChannel(data.deal) + voucherResult, err := revalidator.Revalidate(data.channelID, data.voucher) + require.Equal(t, data.expectedResult, voucherResult) + if data.expectedError == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + require.EqualError(t, err, data.expectedError.Error()) + } + if !data.noSend { + require.Len(t, fre.sentEvents, 1) + event := fre.sentEvents[0] + require.Equal(t, data.expectedID, event.ID) + require.Equal(t, data.expectedEvent, event.Event) + require.Equal(t, data.expectedArgs, event.Args) + } else { + require.Len(t, fre.sentEvents, 0) + } + }) + } +} + +type eventSent struct { + ID rm.ProviderDealIdentifier + Event rm.ProviderEvent + Args []interface{} +} +type fakeRevalidatorEnvironment struct { + node rm.RetrievalProviderNode + sentEvents []eventSent + sendEventError error + returnedDeal rm.ProviderDealState + getError error +} + +func (fre *fakeRevalidatorEnvironment) Node() rm.RetrievalProviderNode { + return fre.node +} + +func (fre *fakeRevalidatorEnvironment) SendEvent(dealID rm.ProviderDealIdentifier, evt rm.ProviderEvent, args ...interface{}) error { + fre.sentEvents = append(fre.sentEvents, eventSent{dealID, evt, args}) + return fre.sendEventError +} + +func (fre *fakeRevalidatorEnvironment) Get(dealID rm.ProviderDealIdentifier) (rm.ProviderDealState, error) { + return fre.returnedDeal, fre.getError +} + +var dealID = retrievalmarket.DealID(10) +var defaultCurrentInterval = uint64(3000) +var defaultPaymentInterval = uint64(1000) +var defaultIntervalIncrease = uint64(0) +var defaultPricePerByte = abi.NewTokenAmount(1000) +var defaultPaymentPerInterval = big.Mul(defaultPricePerByte, abi.NewTokenAmount(int64(defaultPaymentInterval))) +var defaultTotalSent = defaultPaymentInterval +var defaultFundsReceived = big.Mul(defaultPricePerByte, abi.NewTokenAmount(int64(defaultTotalSent))) + +func makeDealState(status retrievalmarket.DealStatus) *retrievalmarket.ProviderDealState { + channelID := shared_testutil.MakeTestChannelID() + return &retrievalmarket.ProviderDealState{ + Status: status, + TotalSent: defaultTotalSent, + CurrentInterval: defaultCurrentInterval, + FundsReceived: defaultFundsReceived, + ChannelID: &channelID, + Receiver: channelID.Initiator, + DealProposal: retrievalmarket.DealProposal{ + ID: dealID, + Params: retrievalmarket.NewParamsV0(defaultPricePerByte, defaultPaymentInterval, defaultIntervalIncrease), + }, + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/doc.go new file mode 100644 index 00000000000..2ce77caabdc --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/doc.go @@ -0,0 +1,4 @@ +// Package testnodes contains stubbed implementations of the RetrievalProviderNode +// and RetrievalClientNode interface to simulate communications with a filecoin +// node +package testnodes diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/sectoraccessor.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/sectoraccessor.go new file mode 100644 index 00000000000..5b8661e618b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/sectoraccessor.go @@ -0,0 +1,109 @@ +package testnodes + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +type sectorKey struct { + sectorID abi.SectorNumber + offset abi.UnpaddedPieceSize + length abi.UnpaddedPieceSize +} + +// TestSectorAccessor is a mock implementation of the SectorAccessor +type TestSectorAccessor struct { + lk sync.Mutex + sectorStubs map[sectorKey][]byte + expectations map[sectorKey]struct{} + received map[sectorKey]struct{} + unsealed map[sectorKey]struct{} + unsealPaused chan struct{} +} + +var _ retrievalmarket.SectorAccessor = &TestSectorAccessor{} + +// NewTestSectorAccessor instantiates a new TestSectorAccessor +func NewTestSectorAccessor() *TestSectorAccessor { + return &TestSectorAccessor{ + sectorStubs: make(map[sectorKey][]byte), + expectations: make(map[sectorKey]struct{}), + received: make(map[sectorKey]struct{}), + unsealed: make(map[sectorKey]struct{}), + } +} + +func (trpn *TestSectorAccessor) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { + _, ok := trpn.unsealed[sectorKey{sectorID, offset, length}] + return ok, nil +} + +func (trpn *TestSectorAccessor) MarkUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) { + trpn.unsealed[sectorKey{sectorID, offset, length}] = struct{}{} +} + +// StubUnseal stubs a response to attempting to unseal a sector with the given paramters +func (trpn *TestSectorAccessor) StubUnseal(sectorID abi.SectorNumber, offset, length abi.UnpaddedPieceSize, data []byte) { + trpn.sectorStubs[sectorKey{sectorID, offset, length}] = data +} + +// ExpectFailedUnseal indicates an expectation that a call will be made to unseal +// a sector with the given params and should fail +func (trpn *TestSectorAccessor) ExpectFailedUnseal(sectorID abi.SectorNumber, offset, length abi.UnpaddedPieceSize) { + trpn.expectations[sectorKey{sectorID, offset, length}] = struct{}{} +} + +// ExpectUnseal indicates an expectation that a call will be made to unseal +// a sector with the given params and should return the given data +func (trpn *TestSectorAccessor) ExpectUnseal(sectorID abi.SectorNumber, offset, length abi.UnpaddedPieceSize, data []byte) { + trpn.expectations[sectorKey{sectorID, offset, length}] = struct{}{} + trpn.StubUnseal(sectorID, offset, length, data) +} + +func (trpn *TestSectorAccessor) PauseUnseal() { + trpn.unsealPaused = make(chan struct{}) +} + +func (trpn *TestSectorAccessor) FinishUnseal() { + close(trpn.unsealPaused) +} + +// UnsealSector simulates unsealing a sector by returning a stubbed response +// or erroring +func (trpn *TestSectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + trpn.lk.Lock() + defer trpn.lk.Unlock() + + if trpn.unsealPaused != nil { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-trpn.unsealPaused: + } + } + + trpn.received[sectorKey{sectorID, offset, length}] = struct{}{} + data, ok := trpn.sectorStubs[sectorKey{sectorID, offset, length}] + if !ok { + return nil, errors.New("Could not unseal") + } + + return ioutil.NopCloser(bytes.NewReader(data)), nil +} + +// VerifyExpectations verifies that all expected calls were made and no other calls +// were made +func (trpn *TestSectorAccessor) VerifyExpectations(t *testing.T) { + require.Equal(t, trpn.expectations, trpn.received) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_client_node.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_client_node.go new file mode 100644 index 00000000000..7d13d4da354 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_client_node.go @@ -0,0 +1,207 @@ +package testnodes + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/ipfs/go-cid" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +// TestRetrievalClientNode is a node adapter for a retrieval client whose responses +// are stubbed +type TestRetrievalClientNode struct { + addFundsOnly bool // set this to true to test adding funds to an existing payment channel + payCh address.Address + payChErr error + createPaychMsgCID, addFundsMsgCID cid.Cid + lane uint64 + laneError error + voucher *paychtypes.SignedVoucher + voucherError, waitErr error + channelAvailableFunds retrievalmarket.ChannelAvailableFunds + checkAvailableFundsErr error + fundsAdded abi.TokenAmount + intergrationTest bool + knownAddreses map[retrievalmarket.RetrievalPeer][]ma.Multiaddr + receivedKnownAddresses map[retrievalmarket.RetrievalPeer]struct{} + expectedKnownAddresses map[retrievalmarket.RetrievalPeer]struct{} + allocateLaneRecorder func(address.Address) + createPaymentVoucherRecorder func(voucher *paychtypes.SignedVoucher) + getCreatePaymentChannelRecorder func(address.Address, address.Address, abi.TokenAmount) +} + +// TestRetrievalClientNodeParams are parameters for initializing a TestRetrievalClientNode +type TestRetrievalClientNodeParams struct { + PayCh address.Address + PayChErr error + CreatePaychCID, AddFundsCID cid.Cid + Lane uint64 + LaneError error + Voucher *paychtypes.SignedVoucher + VoucherError error + AllocateLaneRecorder func(address.Address) + PaymentVoucherRecorder func(voucher *paychtypes.SignedVoucher) + PaymentChannelRecorder func(address.Address, address.Address, abi.TokenAmount) + AddFundsOnly bool + WaitForReadyErr error + ChannelAvailableFunds retrievalmarket.ChannelAvailableFunds + CheckAvailableFundsErr error + IntegrationTest bool +} + +var _ retrievalmarket.RetrievalClientNode = &TestRetrievalClientNode{} + +// NewTestRetrievalClientNode initializes a new TestRetrievalClientNode based on the given params +func NewTestRetrievalClientNode(params TestRetrievalClientNodeParams) *TestRetrievalClientNode { + return &TestRetrievalClientNode{ + addFundsOnly: params.AddFundsOnly, + payCh: params.PayCh, + payChErr: params.PayChErr, + waitErr: params.WaitForReadyErr, + lane: params.Lane, + laneError: params.LaneError, + voucher: params.Voucher, + voucherError: params.VoucherError, + allocateLaneRecorder: params.AllocateLaneRecorder, + createPaymentVoucherRecorder: params.PaymentVoucherRecorder, + getCreatePaymentChannelRecorder: params.PaymentChannelRecorder, + createPaychMsgCID: params.CreatePaychCID, + addFundsMsgCID: params.AddFundsCID, + channelAvailableFunds: addZeroesToAvailableFunds(params.ChannelAvailableFunds), + checkAvailableFundsErr: params.CheckAvailableFundsErr, + intergrationTest: params.IntegrationTest, + knownAddreses: map[retrievalmarket.RetrievalPeer][]ma.Multiaddr{}, + expectedKnownAddresses: map[retrievalmarket.RetrievalPeer]struct{}{}, + receivedKnownAddresses: map[retrievalmarket.RetrievalPeer]struct{}{}, + } +} + +// GetOrCreatePaymentChannel returns a mocked payment channel +func (trcn *TestRetrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) { + if trcn.getCreatePaymentChannelRecorder != nil { + trcn.getCreatePaymentChannelRecorder(clientAddress, minerAddress, clientFundsAvailable) + } + var payCh address.Address + msgCID := trcn.createPaychMsgCID + if trcn.addFundsOnly { + payCh = trcn.payCh + msgCID = trcn.addFundsMsgCID + } + trcn.fundsAdded = clientFundsAvailable + return payCh, msgCID, trcn.payChErr +} + +// AllocateLane creates a mock lane on a payment channel +func (trcn *TestRetrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) { + if trcn.allocateLaneRecorder != nil { + trcn.allocateLaneRecorder(paymentChannel) + } + return trcn.lane, trcn.laneError +} + +// CreatePaymentVoucher creates a mock payment voucher based on a channel and lane +func (trcn *TestRetrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paychtypes.SignedVoucher, error) { + if trcn.createPaymentVoucherRecorder != nil { + trcn.createPaymentVoucherRecorder(trcn.voucher) + } + if trcn.intergrationTest && amount.GreaterThan(trcn.channelAvailableFunds.ConfirmedAmt) { + return nil, retrievalmarket.NewShortfallError(big.Sub(amount, trcn.channelAvailableFunds.ConfirmedAmt)) + } + if trcn.voucherError != nil { + return nil, trcn.voucherError + } + voucher := *trcn.voucher + voucher.Amount = amount + return &voucher, nil +} + +// GetChainHead returns a mock value for the chain head +func (trcn *TestRetrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { + return shared.TipSetToken{}, 0, nil +} + +// WaitForPaymentChannelReady simulates waiting for a payment channel to finish adding funds +func (trcn *TestRetrievalClientNode) WaitForPaymentChannelReady(ctx context.Context, messageCID cid.Cid) (address.Address, error) { + if messageCID.Equals(trcn.createPaychMsgCID) && !trcn.addFundsOnly { + if trcn.intergrationTest { + trcn.channelAvailableFunds.ConfirmedAmt = big.Add(trcn.channelAvailableFunds.ConfirmedAmt, trcn.fundsAdded) + } + return trcn.payCh, trcn.waitErr + } + if messageCID.Equals(trcn.addFundsMsgCID) && trcn.addFundsOnly { + if trcn.intergrationTest { + trcn.channelAvailableFunds.ConfirmedAmt = big.Add(trcn.channelAvailableFunds.ConfirmedAmt, trcn.fundsAdded) + } + return trcn.payCh, trcn.waitErr + } + if trcn.channelAvailableFunds.PendingWaitSentinel != nil && messageCID.Equals(*trcn.channelAvailableFunds.PendingWaitSentinel) { + if trcn.intergrationTest { + trcn.channelAvailableFunds.ConfirmedAmt = big.Add(trcn.channelAvailableFunds.ConfirmedAmt, trcn.channelAvailableFunds.PendingAmt) + trcn.channelAvailableFunds.PendingAmt = trcn.channelAvailableFunds.QueuedAmt + trcn.channelAvailableFunds.PendingWaitSentinel = &shared_testutil.GenerateCids(1)[0] + } + return trcn.payCh, trcn.waitErr + } + return address.Undef, fmt.Errorf("expected messageCID: %s does not match actual: %s", trcn.addFundsMsgCID, messageCID) +} + +// ExpectKnownAddresses stubs a return for a look up of known addresses for the given retrieval peer +// and the fact that it was looked up is verified with VerifyExpectations +func (trcn *TestRetrievalClientNode) ExpectKnownAddresses(p retrievalmarket.RetrievalPeer, maddrs []ma.Multiaddr) { + trcn.expectedKnownAddresses[p] = struct{}{} + trcn.knownAddreses[p] = maddrs +} + +// GetKnownAddresses gets any on known multiaddrs for a given address, so we can add to the peer store +func (trcn *TestRetrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, tok shared.TipSetToken) ([]ma.Multiaddr, error) { + trcn.receivedKnownAddresses[p] = struct{}{} + addrs, ok := trcn.knownAddreses[p] + if !ok { + return nil, errors.New("Provider not found") + } + return addrs, nil +} + +// ResetChannelAvailableFunds is a way to manually change the funds in the payment channel +func (trcn *TestRetrievalClientNode) ResetChannelAvailableFunds(channelAvailableFunds retrievalmarket.ChannelAvailableFunds) { + trcn.channelAvailableFunds = addZeroesToAvailableFunds(channelAvailableFunds) +} + +// VerifyExpectations verifies that all expected known addresses were looked up +func (trcn *TestRetrievalClientNode) VerifyExpectations(t *testing.T) { + require.Equal(t, trcn.expectedKnownAddresses, trcn.receivedKnownAddresses) +} + +// CheckAvailableFunds returns the amount of available funds in a payment channel +func (trcn *TestRetrievalClientNode) CheckAvailableFunds(ctx context.Context, payCh address.Address) (retrievalmarket.ChannelAvailableFunds, error) { + return trcn.channelAvailableFunds, trcn.checkAvailableFundsErr +} + +func addZeroesToAvailableFunds(channelAvailableFunds retrievalmarket.ChannelAvailableFunds) retrievalmarket.ChannelAvailableFunds { + if channelAvailableFunds.ConfirmedAmt.Nil() { + channelAvailableFunds.ConfirmedAmt = big.Zero() + } + if channelAvailableFunds.PendingAmt.Nil() { + channelAvailableFunds.PendingAmt = big.Zero() + } + if channelAvailableFunds.QueuedAmt.Nil() { + channelAvailableFunds.QueuedAmt = big.Zero() + } + if channelAvailableFunds.VoucherReedeemedAmt.Nil() { + channelAvailableFunds.VoucherReedeemedAmt = big.Zero() + } + return channelAvailableFunds +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go new file mode 100644 index 00000000000..c59176e0701 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go @@ -0,0 +1,196 @@ +package testnodes + +import ( + "bytes" + "context" + "encoding/base64" + "sort" + "sync" + "testing" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var log = logging.Logger("retrieval_provnode_test") + +type expectedVoucherKey struct { + paymentChannel string + voucher string + proof string + expectedAmount string +} + +type voucherResult struct { + amount abi.TokenAmount + err error +} + +// TestRetrievalProviderNode is a node adapter for a retrieval provider whose +// responses are mocked +type TestRetrievalProviderNode struct { + ChainHeadError error + lk sync.Mutex + expectedVouchers map[expectedVoucherKey]voucherResult + + expectedPricingParamDeals []abi.DealID + receivedPricingParamDeals []abi.DealID + + expectedPricingPieceCID cid.Cid + receivedPricingPieceCID cid.Cid + + isVerified bool + receivedVouchers []abi.TokenAmount + unsealPaused chan struct{} +} + +var _ retrievalmarket.RetrievalProviderNode = &TestRetrievalProviderNode{} + +// NewTestRetrievalProviderNode instantiates a new TestRetrievalProviderNode +func NewTestRetrievalProviderNode() *TestRetrievalProviderNode { + return &TestRetrievalProviderNode{ + expectedVouchers: make(map[expectedVoucherKey]voucherResult), + } +} + +func (trpn *TestRetrievalProviderNode) MarkVerified() { + trpn.isVerified = true +} + +func (trpn *TestRetrievalProviderNode) ExpectPricingParams(pieceCID cid.Cid, deals []abi.DealID) { + trpn.expectedPricingPieceCID = pieceCID + trpn.expectedPricingParamDeals = deals +} + +func (trpn *TestRetrievalProviderNode) GetRetrievalPricingInput(_ context.Context, pieceCID cid.Cid, deals []abi.DealID) (retrievalmarket.PricingInput, error) { + trpn.receivedPricingParamDeals = deals + trpn.receivedPricingPieceCID = pieceCID + + return retrievalmarket.PricingInput{ + VerifiedDeal: trpn.isVerified, + }, nil +} + +// VerifyExpectations verifies that all expected calls were made and no other calls +// were made +func (trpn *TestRetrievalProviderNode) VerifyExpectations(t *testing.T) { + require.Equal(t, len(trpn.expectedVouchers), len(trpn.receivedVouchers)) + require.Equal(t, trpn.expectedPricingPieceCID, trpn.receivedPricingPieceCID) + require.Equal(t, trpn.expectedPricingParamDeals, trpn.receivedPricingParamDeals) +} + +// SavePaymentVoucher simulates saving a payment voucher with a stubbed result +func (trpn *TestRetrievalProviderNode) SavePaymentVoucher( + ctx context.Context, + paymentChannel address.Address, + voucher *paychtypes.SignedVoucher, + proof []byte, + expectedAmount abi.TokenAmount, + tok shared.TipSetToken) (abi.TokenAmount, error) { + + trpn.lk.Lock() + defer trpn.lk.Unlock() + + key, err := trpn.toExpectedVoucherKey(paymentChannel, voucher, proof, voucher.Amount) + if err != nil { + return abi.TokenAmount{}, err + } + + max := big.Zero() + for _, amt := range trpn.receivedVouchers { + max = big.Max(max, amt) + } + trpn.receivedVouchers = append(trpn.receivedVouchers, voucher.Amount) + rcvd := big.Sub(voucher.Amount, max) + if rcvd.LessThan(big.Zero()) { + rcvd = big.Zero() + } + if len(trpn.expectedVouchers) == 0 { + return rcvd, nil + } + + result, ok := trpn.expectedVouchers[key] + if ok { + return rcvd, result.err + } + var amts []abi.TokenAmount + for _, vchr := range trpn.expectedVouchers { + amts = append(amts, vchr.amount) + } + sort.Slice(amts, func(i, j int) bool { + return amts[i].LessThan(amts[j]) + }) + err = xerrors.Errorf("SavePaymentVoucher failed - voucher %d didnt match expected voucher %d in %s", voucher.Amount, expectedAmount, amts) + return abi.TokenAmount{}, err +} + +// GetMinerWorkerAddress translates an address +func (trpn *TestRetrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) { + return addr, nil +} + +// GetChainHead returns a mock value for the chain head +func (trpn *TestRetrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { + return []byte{42}, 0, trpn.ChainHeadError +} + +// --- Non-interface Functions + +// to ExpectedVoucherKey creates a lookup key for expected vouchers. +func (trpn *TestRetrievalProviderNode) toExpectedVoucherKey(paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount) (expectedVoucherKey, error) { + pcString := paymentChannel.String() + buf := new(bytes.Buffer) + if err := voucher.MarshalCBOR(buf); err != nil { + return expectedVoucherKey{}, err + } + voucherString := base64.RawURLEncoding.EncodeToString(buf.Bytes()) + proofString := string(proof) + expectedAmountString := expectedAmount.String() + return expectedVoucherKey{pcString, voucherString, proofString, expectedAmountString}, nil +} + +// ExpectVoucher sets a voucher to be expected by SavePaymentVoucher +// paymentChannel: the address of the payment channel the client creates +// voucher: the voucher to match +// proof: the proof to use (can be blank) +// expectedAmount: the expected tokenamount for this voucher +// actualAmount: the actual amount to use. use same as expectedAmount unless you want to trigger an error +// expectedErr: an error message to expect +func (trpn *TestRetrievalProviderNode) ExpectVoucher( + paymentChannel address.Address, + voucher *paychtypes.SignedVoucher, + proof []byte, + expectedAmount abi.TokenAmount, + actualAmount abi.TokenAmount, // the actual amount it should have (same unless you want to trigger an error) + expectedErr error) error { + vch := *voucher + vch.Amount = expectedAmount + key, err := trpn.toExpectedVoucherKey(paymentChannel, &vch, proof, expectedAmount) + if err != nil { + return err + } + trpn.expectedVouchers[key] = voucherResult{actualAmount, expectedErr} + return nil +} + +func (trpn *TestRetrievalProviderNode) AddReceivedVoucher(amt abi.TokenAmount) { + trpn.receivedVouchers = append(trpn.receivedVouchers, amt) +} + +func (trpn *TestRetrievalProviderNode) MaxReceivedVoucher() abi.TokenAmount { + max := abi.NewTokenAmount(0) + for _, amt := range trpn.receivedVouchers { + max = big.Max(max, amt) + } + return max +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes.go new file mode 100644 index 00000000000..e73f6064675 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes.go @@ -0,0 +1,55 @@ +package maptypes + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +//go:generate cbor-gen-for --map-encoding ClientDealState1 ProviderDealState1 + +// Version 1 of the ClientDealState +type ClientDealState1 struct { + retrievalmarket.DealProposal + StoreID *uint64 + ChannelID datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *retrievalmarket.PaymentInfo + Status retrievalmarket.DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid + VoucherShortfall abi.TokenAmount + LegacyProtocol bool +} + +// Version 1 of the ProviderDealState +type ProviderDealState1 struct { + retrievalmarket.DealProposal + StoreID uint64 + ChannelID datatransfer.ChannelID + PieceInfo *piecestore.PieceInfo + Status retrievalmarket.DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes_cbor_gen.go new file mode 100644 index 00000000000..b53d1febbd7 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/maptypes/maptypes_cbor_gen.go @@ -0,0 +1,1141 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package maptypes + +import ( + "fmt" + "io" + "math" + "sort" + + piecestore "github.com/filecoin-project/go-fil-markets/piecestore" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *ClientDealState1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{181}); err != nil { + return err + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if len("LastPaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LastPaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LastPaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LastPaymentRequested")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if len("AllBlocksReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AllBlocksReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AllBlocksReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AllBlocksReceived")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if len("TotalFunds") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalFunds\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalFunds"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalFunds")); err != nil { + return err + } + + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if len("ClientWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientWallet")); err != nil { + return err + } + + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinerWallet (address.Address) (struct) + if len("MinerWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWallet")); err != nil { + return err + } + + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + if len("PaymentInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInfo")); err != nil { + return err + } + + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Sender)); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + if len("TotalReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalReceived")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + if len("BytesPaidFor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BytesPaidFor\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BytesPaidFor"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BytesPaidFor")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if len("PaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentRequested")); err != nil { + return err + } + + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if len("FundsSpent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsSpent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsSpent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsSpent")); err != nil { + return err + } + + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if len("UnsealFundsPaid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealFundsPaid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealFundsPaid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealFundsPaid")); err != nil { + return err + } + + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + if len("WaitMsgCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitMsgCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitMsgCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WaitMsgCID")); err != nil { + return err + } + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.VoucherShortfall (big.Int) (struct) + if len("VoucherShortfall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherShortfall\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherShortfall"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VoucherShortfall")); err != nil { + return err + } + + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ClientDealState1) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDealState1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.LastPaymentRequested (bool) (bool) + case "LastPaymentRequested": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.AllBlocksReceived (bool) (bool) + case "AllBlocksReceived": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TotalFunds (big.Int) (struct) + case "TotalFunds": + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.ClientWallet (address.Address) (struct) + case "ClientWallet": + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.MinerWallet (address.Address) (struct) + case "MinerWallet": + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + case "PaymentInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(retrievalmarket.PaymentInfo) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.TotalReceived (uint64) (uint64) + case "TotalReceived": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.BytesPaidFor (uint64) (uint64) + case "BytesPaidFor": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.PaymentRequested (big.Int) (struct) + case "PaymentRequested": + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + case "FundsSpent": + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.UnsealFundsPaid (big.Int) (struct) + case "UnsealFundsPaid": + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + case "WaitMsgCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.VoucherShortfall (big.Int) (struct) + case "VoucherShortfall": + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{171}); err != nil { + return err + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (piecestore.PieceInfo) (struct) + if len("PieceInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceInfo")); err != nil { + return err + } + + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len("Receiver") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Receiver\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Receiver"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Receiver")); err != nil { + return err + } + + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + if len("TotalSent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalSent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalSent")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if len("FundsReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReceived")); err != nil { + return err + } + + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState1) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.PieceInfo (piecestore.PieceInfo) (struct) + case "PieceInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(piecestore.PieceInfo) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + case "Receiver": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + case "TotalSent": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + case "FundsReceived": + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations.go new file mode 100644 index 00000000000..0c8aa7ef08a --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations.go @@ -0,0 +1,386 @@ +package migrations + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/piecestore" + piecemigrations "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations/maptypes" +) + +//go:generate cbor-gen-for Query0 QueryResponse0 DealProposal0 DealResponse0 Params0 QueryParams0 DealPayment0 ClientDealState0 ProviderDealState0 PaymentInfo0 RetrievalPeer0 Ask0 + +// PaymentInfo0 is version 0 of PaymentInfo +type PaymentInfo0 struct { + PayCh address.Address + Lane uint64 +} + +// ClientDealState0 is version 0 of ClientDealState +type ClientDealState0 struct { + DealProposal0 + StoreID *uint64 + ChannelID datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *PaymentInfo0 + Status retrievalmarket.DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for + VoucherShortfall abi.TokenAmount +} + +// ProviderDealState0 is version 0 of ProviderDealState +type ProviderDealState0 struct { + DealProposal0 + StoreID uint64 + ChannelID datatransfer.ChannelID + PieceInfo *piecemigrations.PieceInfo0 + Status retrievalmarket.DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 +} + +// RetrievalPeer0 is version 0 of RetrievalPeer +type RetrievalPeer0 struct { + Address address.Address + ID peer.ID // optional + PieceCID *cid.Cid +} + +// QueryParams0 is version 0 of QueryParams +type QueryParams0 struct { + PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this + //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this + //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this +} + +// Query0 is version 0 of Query +type Query0 struct { + PayloadCID cid.Cid // V0 + QueryParams0 // V1 +} + +// QueryResponse0 is version 0 of QueryResponse +type QueryResponse0 struct { + Status retrievalmarket.QueryResponseStatus + PieceCIDFound retrievalmarket.QueryItemStatus // V1 - if a PieceCID was requested, the result + //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result + + Size uint64 // Total size of piece in bytes + //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size + + PaymentAddress address.Address // address to send funds to -- may be different than miner addr + MinPricePerByte abi.TokenAmount + MaxPaymentInterval uint64 + MaxPaymentIntervalIncrease uint64 + Message string + UnsealPrice abi.TokenAmount +} + +// Params0 is version 0 of Params +type Params0 struct { + Selector *cbg.Deferred // V1 + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + PaymentInterval uint64 // when to request payment + PaymentIntervalIncrease uint64 + UnsealPrice abi.TokenAmount +} + +// DealProposal0 is version 0 of DealProposal +type DealProposal0 struct { + PayloadCID cid.Cid + ID retrievalmarket.DealID + Params0 +} + +// Type method makes DealProposal0 usable as a voucher +func (dp *DealProposal0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealProposal" +} + +// DealResponse0 is version 0 of DealResponse +type DealResponse0 struct { + Status retrievalmarket.DealStatus + ID retrievalmarket.DealID + + // payment required to proceed + PaymentOwed abi.TokenAmount + + Message string +} + +// Type method makes DealResponse0 usable as a voucher result +func (dr *DealResponse0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealResponse" +} + +// DealPayment0 is version 0 of DealPayment +type DealPayment0 struct { + ID retrievalmarket.DealID + PaymentChannel address.Address + PaymentVoucher *paychtypes.SignedVoucher +} + +// Type method makes DealPayment0 usable as a voucher +func (dr *DealPayment0) Type() datatransfer.TypeIdentifier { + return "RetrievalDealPayment" +} + +// Ask0 is version 0 of Ask +type Ask0 struct { + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 +} + +// MigrateQueryParams0To1 migrates tuple encoded query params to map encoded query params +func MigrateQueryParams0To1(oldParams QueryParams0) retrievalmarket.QueryParams { + return retrievalmarket.QueryParams{ + PieceCID: oldParams.PieceCID, + } +} + +// MigrateQuery0To1 migrates tuple encoded query to map encoded query +func MigrateQuery0To1(oldQuery Query0) retrievalmarket.Query { + return retrievalmarket.Query{ + PayloadCID: oldQuery.PayloadCID, + QueryParams: MigrateQueryParams0To1(oldQuery.QueryParams0), + } +} + +// MigrateQueryResponse0To1 migrates tuple encoded query response to map encoded query response +func MigrateQueryResponse0To1(oldQr QueryResponse0) retrievalmarket.QueryResponse { + return retrievalmarket.QueryResponse{ + Status: oldQr.Status, + PieceCIDFound: oldQr.PieceCIDFound, + Size: oldQr.Size, + PaymentAddress: oldQr.PaymentAddress, + MinPricePerByte: oldQr.MinPricePerByte, + MaxPaymentInterval: oldQr.MaxPaymentInterval, + MaxPaymentIntervalIncrease: oldQr.MaxPaymentIntervalIncrease, + Message: oldQr.Message, + UnsealPrice: oldQr.UnsealPrice, + } +} + +// MigrateParams0To1 migrates tuple encoded deal params to map encoded deal params +func MigrateParams0To1(oldParams Params0) retrievalmarket.Params { + return retrievalmarket.Params{ + Selector: oldParams.Selector, + PieceCID: oldParams.PieceCID, + PricePerByte: oldParams.PricePerByte, + PaymentInterval: oldParams.PaymentInterval, + PaymentIntervalIncrease: oldParams.PaymentIntervalIncrease, + UnsealPrice: oldParams.UnsealPrice, + } +} + +// MigrateDealPayment0To1 migrates a tuple encoded DealPayment to a map +// encoded deal payment +func MigrateDealPayment0To1(oldDp DealPayment0) retrievalmarket.DealPayment { + return retrievalmarket.DealPayment{ + ID: oldDp.ID, + PaymentChannel: oldDp.PaymentChannel, + PaymentVoucher: oldDp.PaymentVoucher, + } +} + +// MigrateDealProposal0To1 migrates a tuple encoded DealProposal to a map +// encoded deal proposal +func MigrateDealProposal0To1(oldDp DealProposal0) retrievalmarket.DealProposal { + return retrievalmarket.DealProposal{ + PayloadCID: oldDp.PayloadCID, + ID: oldDp.ID, + Params: MigrateParams0To1(oldDp.Params0), + } +} + +// MigrateDealResponse0To1 migrates a tuple encoded DealResponse to a map +// encoded deal response +func MigrateDealResponse0To1(oldDr DealResponse0) retrievalmarket.DealResponse { + return retrievalmarket.DealResponse{ + Status: oldDr.Status, + ID: oldDr.ID, + PaymentOwed: oldDr.PaymentOwed, + Message: oldDr.Message, + } +} + +// MigratePaymentInfo0To1 migrates an optional payment info tuple encoded struct +// to a map encoded struct +func MigratePaymentInfo0To1(oldPi *PaymentInfo0) *retrievalmarket.PaymentInfo { + if oldPi == nil { + return nil + } + return &retrievalmarket.PaymentInfo{ + PayCh: oldPi.PayCh, + Lane: oldPi.Lane, + } +} + +// MigrateClientDealState0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateClientDealState0To1(oldDs *ClientDealState0) (*maptypes.ClientDealState1, error) { + return &maptypes.ClientDealState1{ + DealProposal: MigrateDealProposal0To1(oldDs.DealProposal0), + StoreID: oldDs.StoreID, + ChannelID: oldDs.ChannelID, + LastPaymentRequested: oldDs.LastPaymentRequested, + AllBlocksReceived: oldDs.AllBlocksReceived, + TotalFunds: oldDs.TotalFunds, + ClientWallet: oldDs.ClientWallet, + MinerWallet: oldDs.MinerWallet, + PaymentInfo: MigratePaymentInfo0To1(oldDs.PaymentInfo), + Status: oldDs.Status, + Sender: oldDs.Sender, + TotalReceived: oldDs.TotalReceived, + Message: oldDs.Message, + BytesPaidFor: oldDs.BytesPaidFor, + CurrentInterval: oldDs.CurrentInterval, + PaymentRequested: oldDs.PaymentRequested, + FundsSpent: oldDs.FundsSpent, + UnsealFundsPaid: oldDs.UnsealFundsPaid, + WaitMsgCID: oldDs.WaitMsgCID, + VoucherShortfall: oldDs.VoucherShortfall, + LegacyProtocol: true, + }, nil +} + +// MigrateClientDealState1To2 migrates from v1 to v2 of a ClientDealState. +// The difference is that in v2 the ChannelID is a pointer, because the +// ChannelID is not set until the data transfer has started, so it should +// initially be nil. +func MigrateClientDealState1To2(oldDs *maptypes.ClientDealState1) (*retrievalmarket.ClientDealState, error) { + var chid *datatransfer.ChannelID + if oldDs.ChannelID.Initiator != "" && oldDs.ChannelID.Responder != "" { + chid = &oldDs.ChannelID + } + return &retrievalmarket.ClientDealState{ + DealProposal: oldDs.DealProposal, + StoreID: oldDs.StoreID, + ChannelID: chid, + LastPaymentRequested: oldDs.LastPaymentRequested, + AllBlocksReceived: oldDs.AllBlocksReceived, + TotalFunds: oldDs.TotalFunds, + ClientWallet: oldDs.ClientWallet, + MinerWallet: oldDs.MinerWallet, + PaymentInfo: oldDs.PaymentInfo, + Status: oldDs.Status, + Sender: oldDs.Sender, + TotalReceived: oldDs.TotalReceived, + Message: oldDs.Message, + BytesPaidFor: oldDs.BytesPaidFor, + CurrentInterval: oldDs.CurrentInterval, + PaymentRequested: oldDs.PaymentRequested, + FundsSpent: oldDs.FundsSpent, + UnsealFundsPaid: oldDs.UnsealFundsPaid, + WaitMsgCID: oldDs.WaitMsgCID, + VoucherShortfall: oldDs.VoucherShortfall, + LegacyProtocol: true, + }, nil +} + +// MigrateProviderDealState0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateProviderDealState0To1(oldDs *ProviderDealState0) (*maptypes.ProviderDealState1, error) { + var pieceInfo *piecestore.PieceInfo + var err error + if oldDs.PieceInfo != nil { + pieceInfo, err = piecemigrations.MigratePieceInfo0To1(oldDs.PieceInfo) + if err != nil { + return nil, err + } + } + return &maptypes.ProviderDealState1{ + DealProposal: MigrateDealProposal0To1(oldDs.DealProposal0), + StoreID: oldDs.StoreID, + ChannelID: oldDs.ChannelID, + PieceInfo: pieceInfo, + Status: oldDs.Status, + Receiver: oldDs.Receiver, + TotalSent: oldDs.TotalSent, + FundsReceived: oldDs.FundsReceived, + Message: oldDs.Message, + CurrentInterval: oldDs.CurrentInterval, + LegacyProtocol: true, + }, nil +} + +// MigrateProviderDealState0To1 migrates from v1 to v2 of a +// MigrateProviderDealState. +// The difference is that in v2 the ChannelID is a pointer, because the +// ChannelID is not set until the data transfer has started, so it should +// initially be nil. +func MigrateProviderDealState1To2(oldDs *maptypes.ProviderDealState1) (*retrievalmarket.ProviderDealState, error) { + var chid *datatransfer.ChannelID + if oldDs.ChannelID.Initiator != "" && oldDs.ChannelID.Responder != "" { + chid = &oldDs.ChannelID + } + return &retrievalmarket.ProviderDealState{ + DealProposal: oldDs.DealProposal, + StoreID: oldDs.StoreID, + ChannelID: chid, + PieceInfo: oldDs.PieceInfo, + Status: oldDs.Status, + Receiver: oldDs.Receiver, + TotalSent: oldDs.TotalSent, + FundsReceived: oldDs.FundsReceived, + Message: oldDs.Message, + CurrentInterval: oldDs.CurrentInterval, + LegacyProtocol: oldDs.LegacyProtocol, + }, nil +} + +// MigrateAsk0To1 migrates a tuple encoded deal state to a map encoded deal state +func MigrateAsk0To1(oldAsk *Ask0) (*retrievalmarket.Ask, error) { + return &retrievalmarket.Ask{ + PricePerByte: oldAsk.PricePerByte, + UnsealPrice: oldAsk.UnsealPrice, + PaymentInterval: oldAsk.PaymentInterval, + PaymentIntervalIncrease: oldAsk.PaymentIntervalIncrease, + }, nil +} + +// ClientMigrations are migrations for the client's store of retrieval deals +var ClientMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateClientDealState0To1, "1"), + versioned.NewVersionedBuilder(MigrateClientDealState1To2, "2").OldVersion("1"), +} + +// ProviderMigrations are migrations for the providers's store of retrieval deals +var ProviderMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateProviderDealState0To1, "1"). + FilterKeys([]string{"/retrieval-ask", "/retrieval-ask/latest", "/retrieval-ask/1/latest", "/retrieval-ask/versions/current"}), + versioned.NewVersionedBuilder(MigrateProviderDealState1To2, "2").OldVersion("1"), +} + +// AskMigrations are migrations for the providers's retrieval ask +var AskMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateAsk0To1, versioning.VersionKey("1")), +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_cbor_gen.go new file mode 100644 index 00000000000..43817f564e6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_cbor_gen.go @@ -0,0 +1,1815 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + migrations "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufQuery0 = []byte{130} + +func (t *Query0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQuery0); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.QueryParams0 (migrations.QueryParams0) (struct) + if err := t.QueryParams0.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Query0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Query0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayloadCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.QueryParams0 (migrations.QueryParams0) (struct) + + { + + if err := t.QueryParams0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.QueryParams0: %w", err) + } + + } + return nil +} + +var lengthBufQueryResponse0 = []byte{137} + +func (t *QueryResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQueryResponse0); err != nil { + return err + } + + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceCIDFound)); err != nil { + return err + } + + // t.Size (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.PaymentAddress (address.Address) (struct) + if err := t.PaymentAddress.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPricePerByte (big.Int) (struct) + if err := t.MinPricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.MaxPaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval)); err != nil { + return err + } + + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *QueryResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 9 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.QueryResponseStatus(extra) + + } + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceCIDFound = retrievalmarket.QueryItemStatus(extra) + + } + // t.Size (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.PaymentAddress (address.Address) (struct) + + { + + if err := t.PaymentAddress.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) + } + + } + // t.MinPricePerByte (big.Int) (struct) + + { + + if err := t.MinPricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) + } + + } + // t.MaxPaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentInterval = uint64(extra) + + } + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentIntervalIncrease = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + return nil +} + +var lengthBufDealProposal0 = []byte{131} + +func (t *DealProposal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealProposal0); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.ID (retrievalmarket.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Params0 (migrations.Params0) (struct) + if err := t.Params0.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealProposal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealProposal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayloadCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.ID (retrievalmarket.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = retrievalmarket.DealID(extra) + + } + // t.Params0 (migrations.Params0) (struct) + + { + + if err := t.Params0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Params0: %w", err) + } + + } + return nil +} + +var lengthBufDealResponse0 = []byte{132} + +func (t *DealResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealResponse0); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ID (retrievalmarket.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentOwed (big.Int) (struct) + if err := t.PaymentOwed.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + return nil +} + +func (t *DealResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.ID (retrievalmarket.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = retrievalmarket.DealID(extra) + + } + // t.PaymentOwed (big.Int) (struct) + + { + + if err := t.PaymentOwed.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + return nil +} + +var lengthBufParams0 = []byte{134} + +func (t *Params0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufParams0); err != nil { + return err + } + + // t.Selector (typegen.Deferred) (struct) + if err := t.Selector.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + // t.PricePerByte (big.Int) (struct) + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Params0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Params0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Selector (typegen.Deferred) (struct) + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + // t.PricePerByte (big.Int) (struct) + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + return nil +} + +var lengthBufQueryParams0 = []byte{129} + +func (t *QueryParams0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufQueryParams0); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *QueryParams0) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryParams0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + return nil +} + +var lengthBufDealPayment0 = []byte{131} + +func (t *DealPayment0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealPayment0); err != nil { + return err + } + + // t.ID (retrievalmarket.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentChannel (address.Address) (struct) + if err := t.PaymentChannel.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentVoucher (paych.SignedVoucher) (struct) + if err := t.PaymentVoucher.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealPayment0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealPayment0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ID (retrievalmarket.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = retrievalmarket.DealID(extra) + + } + // t.PaymentChannel (address.Address) (struct) + + { + + if err := t.PaymentChannel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) + } + + } + // t.PaymentVoucher (paych.SignedVoucher) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentVoucher = new(paych.SignedVoucher) + if err := t.PaymentVoucher.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufClientDealState0 = []byte{148} + +func (t *ClientDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufClientDealState0); err != nil { + return err + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + if err := t.DealProposal0.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinerWallet (address.Address) (struct) + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (migrations.PaymentInfo0) (struct) + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Sender)); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.VoucherShortfall (big.Int) (struct) + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ClientDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 20 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + + { + + if err := t.DealProposal0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal0: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.LastPaymentRequested (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.AllBlocksReceived (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TotalFunds (big.Int) (struct) + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.ClientWallet (address.Address) (struct) + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.MinerWallet (address.Address) (struct) + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (migrations.PaymentInfo0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(PaymentInfo0) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.Sender (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.TotalReceived (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.BytesPaidFor (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.CurrentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.PaymentRequested (big.Int) (struct) + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.UnsealFundsPaid (big.Int) (struct) + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.VoucherShortfall (big.Int) (struct) + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + return nil +} + +var lengthBufProviderDealState0 = []byte{138} + +func (t *ProviderDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProviderDealState0); err != nil { + return err + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + if err := t.DealProposal0.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (migrations.PieceInfo0) (struct) + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + return nil +} + +func (t *ProviderDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 10 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal0 (migrations.DealProposal0) (struct) + + { + + if err := t.DealProposal0.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal0: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + + { + + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.PieceInfo (migrations.PieceInfo0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(migrations.PieceInfo0) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = retrievalmarket.DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + return nil +} + +var lengthBufPaymentInfo0 = []byte{130} + +func (t *PaymentInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPaymentInfo0); err != nil { + return err + } + + // t.PayCh (address.Address) (struct) + if err := t.PayCh.MarshalCBOR(cw); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + return nil +} + +func (t *PaymentInfo0) UnmarshalCBOR(r io.Reader) (err error) { + *t = PaymentInfo0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PayCh (address.Address) (struct) + + { + + if err := t.PayCh.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PayCh: %w", err) + } + + } + // t.Lane (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + return nil +} + +var lengthBufRetrievalPeer0 = []byte{131} + +func (t *RetrievalPeer0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufRetrievalPeer0); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(cw); err != nil { + return err + } + + // t.ID (peer.ID) (string) + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ID)); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *RetrievalPeer0) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeer0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.ID (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ID = peer.ID(sval) + } + // t.PieceCID (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + return nil +} + +var lengthBufAsk0 = []byte{132} + +func (t *Ask0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAsk0); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Ask0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Ask0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PricePerByte (big.Int) (struct) + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.UnsealPrice (big.Int) (struct) + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_test.go new file mode 100644 index 00000000000..96a9e46f282 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/migrations/migrations_test.go @@ -0,0 +1,299 @@ +package migrations + +import ( + "context" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + dss "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/piecestore/migrations" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" +) + +func NewActorAddr(t testing.TB, data string) address.Address { + ret, err := address.NewActorAddress([]byte(data)) + require.NoError(t, err) + return ret +} + +func TestClientStateMigration(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create a v0 client deal state + dealID := retrievalmarket.DealID(1) + storeID := uint64(1) + dummyCid, err := cid.Parse("bafkqaaa") + require.NoError(t, err) + dealState := ClientDealState0{ + DealProposal0: DealProposal0{ + PayloadCID: dummyCid, + ID: dealID, + Params0: Params0{ + PieceCID: &dummyCid, + PricePerByte: abi.NewTokenAmount(0), + UnsealPrice: abi.NewTokenAmount(0), + }, + }, + TotalFunds: abi.NewTokenAmount(0), + ClientWallet: NewActorAddr(t, "client"), + MinerWallet: NewActorAddr(t, "miner"), + TotalReceived: 0, + CurrentInterval: 10, + BytesPaidFor: 0, + PaymentRequested: abi.NewTokenAmount(0), + FundsSpent: abi.NewTokenAmount(0), + Status: retrievalmarket.DealStatusNew, + Sender: peer.ID("sender"), + UnsealFundsPaid: big.Zero(), + StoreID: &storeID, + } + dealStateWithChannelID := dealState + chid := datatransfer.ChannelID{ + Initiator: "initiator", + Responder: "responder", + ID: 1, + } + dealStateWithChannelID.ChannelID = chid + + testCases := []struct { + name string + dealState0 *ClientDealState0 + expChannelID *datatransfer.ChannelID + }{{ + name: "from v0 - v2 with channel ID", + dealState0: &dealState, + expChannelID: nil, + }, { + name: "from v0 - v2 with no channel ID", + dealState0: &dealStateWithChannelID, + expChannelID: &chid, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + + // Store the v0 client deal state to the datastore + stateMachines0, err := fsm.New(ds, fsm.Parameters{ + Environment: &mockClientEnv{}, + StateType: ClientDealState0{}, + StateKeyField: "Status", + Events: fsm.Events{}, + StateEntryFuncs: fsm.StateEntryFuncs{}, + FinalityStates: []fsm.StateKey{}, + }) + require.NoError(t, err) + + err = stateMachines0.Begin(dealID, tc.dealState0) + require.NoError(t, err) + + // Prepare to run migration to v2 datastore + retrievalMigrations, err := ClientMigrations.Build() + require.NoError(t, err) + + stateMachines, migrateStateMachines, err := versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: &mockClientEnv{}, + StateType: retrievalmarket.ClientDealState{}, + StateKeyField: "Status", + Events: clientstates.ClientEvents, + StateEntryFuncs: clientstates.ClientStateEntryFuncs, + FinalityStates: clientstates.ClientFinalityStates, + }, retrievalMigrations, "2") + require.NoError(t, err) + + // Run migration to v2 datastore + err = migrateStateMachines(ctx) + require.NoError(t, err) + + var states []retrievalmarket.ClientDealState + err = stateMachines.List(&states) + require.NoError(t, err) + + require.Len(t, states, 1) + if tc.expChannelID == nil { + // Ensure that the channel ID is nil if it was not explicitly defined + require.Nil(t, states[0].ChannelID) + } else { + // Ensure that the channel ID is correct if it was defined + require.Equal(t, chid, *states[0].ChannelID) + } + }) + } +} + +func TestProviderStateMigration(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create a v0 provider deal state + dealID := retrievalmarket.DealID(1) + storeID := uint64(1) + dummyCid, err := cid.Parse("bafkqaaa") + require.NoError(t, err) + dealState := ProviderDealState0{ + DealProposal0: DealProposal0{ + PayloadCID: dummyCid, + ID: dealID, + Params0: Params0{ + PieceCID: &dummyCid, + PricePerByte: abi.NewTokenAmount(0), + UnsealPrice: abi.NewTokenAmount(0), + }, + }, + StoreID: storeID, + PieceInfo: &migrations.PieceInfo0{ + PieceCID: dummyCid, + Deals: nil, + }, + Status: retrievalmarket.DealStatusNew, + Receiver: peer.ID("receiver"), + TotalSent: 0, + FundsReceived: abi.NewTokenAmount(0), + Message: "hello", + CurrentInterval: 10, + } + dealStateWithChannelID := dealState + chid := datatransfer.ChannelID{ + Initiator: "initiator", + Responder: "responder", + ID: 1, + } + dealStateWithChannelID.ChannelID = chid + + testCases := []struct { + name string + dealState0 *ProviderDealState0 + expChannelID *datatransfer.ChannelID + }{{ + name: "from v0 - v2 with channel ID", + dealState0: &dealState, + expChannelID: nil, + }, { + name: "from v0 - v2 with no channel ID", + dealState0: &dealStateWithChannelID, + expChannelID: &chid, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + + // Store the v0 provider deal state to the datastore + stateMachines0, err := fsm.New(ds, fsm.Parameters{ + Environment: &mockProviderEnv{}, + StateType: ProviderDealState0{}, + StateKeyField: "Status", + Events: fsm.Events{}, + StateEntryFuncs: fsm.StateEntryFuncs{}, + FinalityStates: []fsm.StateKey{}, + }) + require.NoError(t, err) + + err = stateMachines0.Begin(dealID, tc.dealState0) + require.NoError(t, err) + + // Prepare to run migration to v2 datastore + retrievalMigrations, err := ProviderMigrations.Build() + require.NoError(t, err) + + stateMachines, migrateStateMachines, err := versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: &mockProviderEnv{}, + StateType: retrievalmarket.ProviderDealState{}, + StateKeyField: "Status", + Events: providerstates.ProviderEvents, + StateEntryFuncs: providerstates.ProviderStateEntryFuncs, + FinalityStates: providerstates.ProviderFinalityStates, + }, retrievalMigrations, "2") + require.NoError(t, err) + + // Run migration to v2 datastore + err = migrateStateMachines(ctx) + require.NoError(t, err) + + var states []retrievalmarket.ProviderDealState + err = stateMachines.List(&states) + require.NoError(t, err) + + require.Len(t, states, 1) + if tc.expChannelID == nil { + // Ensure that the channel ID is nil if it was not explicitly defined + require.Nil(t, states[0].ChannelID) + } else { + // Ensure that the channel ID is correct if it was defined + require.Equal(t, chid, *states[0].ChannelID) + } + }) + } +} + +type mockClientEnv struct { +} + +func (e *mockClientEnv) Node() retrievalmarket.RetrievalClientNode { + return nil +} + +func (e *mockClientEnv) OpenDataTransfer(ctx context.Context, to peer.ID, proposal *retrievalmarket.DealProposal, legacy bool) (datatransfer.ChannelID, error) { + return datatransfer.ChannelID{}, nil +} + +func (e *mockClientEnv) SendDataTransferVoucher(_ context.Context, _ datatransfer.ChannelID, _ *retrievalmarket.DealPayment, _ bool) error { + return nil +} + +func (e *mockClientEnv) CloseDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return nil +} + +func (e *mockClientEnv) FinalizeBlockstore(ctx context.Context, id retrievalmarket.DealID) error { + return nil +} + +var _ clientstates.ClientDealEnvironment = &mockClientEnv{} + +type mockProviderEnv struct { +} + +func (te *mockProviderEnv) PrepareBlockstore(ctx context.Context, dealID retrievalmarket.DealID, pieceCid cid.Cid) error { + return nil +} + +func (te *mockProviderEnv) Node() retrievalmarket.RetrievalProviderNode { + return nil +} + +func (te *mockProviderEnv) DeleteStore(dealID retrievalmarket.DealID) error { + return nil +} + +func (te *mockProviderEnv) TrackTransfer(deal retrievalmarket.ProviderDealState) error { + return nil +} + +func (te *mockProviderEnv) UntrackTransfer(deal retrievalmarket.ProviderDealState) error { + return nil +} + +func (te *mockProviderEnv) ResumeDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return nil +} + +func (te *mockProviderEnv) CloseDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return nil +} + +var _ providerstates.ProviderDealEnvironment = &mockProviderEnv{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/doc.go new file mode 100644 index 00000000000..92e82a269aa --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/doc.go @@ -0,0 +1,9 @@ +/* +Package network providers an abstraction over a libp2p host for managing retrieval's Libp2p protocols: + +network.go - defines the interfaces that must be implemented to serve as a retrieval network +deal-stream.go - implements the `RetrievalDealStream` interface, a data stream for retrieval deal traffic only +query-stream.go - implements the `RetrievalQueryStream` interface, a data stream for retrieval query traffic only +libp2p_impl.go - provides the production implementation of the `RetrievalMarketNetwork` interface. +*/ +package network diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl.go new file mode 100644 index 00000000000..f7e0ced8be3 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl.go @@ -0,0 +1,123 @@ +package network + +import ( + "bufio" + "context" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" +) + +var log = logging.Logger("retrieval_network") +var _ RetrievalMarketNetwork = new(libp2pRetrievalMarketNetwork) + +// Option is an option for configuring the libp2p storage market network +type Option func(*libp2pRetrievalMarketNetwork) + +// RetryParameters changes the default parameters around connection reopening +func RetryParameters(minDuration time.Duration, maxDuration time.Duration, attempts float64, backoffFactor float64) Option { + return func(impl *libp2pRetrievalMarketNetwork) { + impl.retryStream.SetOptions(shared.RetryParameters(minDuration, maxDuration, attempts, backoffFactor)) + } +} + +// SupportedProtocols sets what protocols this network instances listens on +func SupportedProtocols(supportedProtocols []protocol.ID) Option { + return func(impl *libp2pRetrievalMarketNetwork) { + impl.supportedProtocols = supportedProtocols + } +} + +// NewFromLibp2pHost constructs a new instance of the RetrievalMarketNetwork from a +// libp2p host +func NewFromLibp2pHost(h host.Host, options ...Option) RetrievalMarketNetwork { + impl := &libp2pRetrievalMarketNetwork{ + host: h, + retryStream: shared.NewRetryStream(h), + supportedProtocols: []protocol.ID{ + retrievalmarket.QueryProtocolID, + retrievalmarket.OldQueryProtocolID, + }, + } + for _, option := range options { + option(impl) + } + return impl +} + +// libp2pRetrievalMarketNetwork transforms the libp2p host interface, which sends and receives +// NetMessage objects, into the graphsync network interface. +// It implements the RetrievalMarketNetwork API. +type libp2pRetrievalMarketNetwork struct { + host host.Host + retryStream *shared.RetryStream + // inbound messages from the network are forwarded to the receiver + receiver RetrievalReceiver + supportedProtocols []protocol.ID +} + +// NewQueryStream creates a new RetrievalQueryStream using the provided peer.ID +func (impl *libp2pRetrievalMarketNetwork) NewQueryStream(id peer.ID) (RetrievalQueryStream, error) { + s, err := impl.retryStream.OpenStream(context.Background(), id, impl.supportedProtocols) + if err != nil { + log.Warn(err) + return nil, err + } + buffered := bufio.NewReaderSize(s, 16) + if s.Protocol() == retrievalmarket.OldQueryProtocolID { + return &oldQueryStream{p: id, rw: s, buffered: buffered}, nil + } + return &queryStream{p: id, rw: s, buffered: buffered}, nil +} + +// SetDelegate sets a RetrievalReceiver to handle stream data +func (impl *libp2pRetrievalMarketNetwork) SetDelegate(r RetrievalReceiver) error { + impl.receiver = r + for _, proto := range impl.supportedProtocols { + impl.host.SetStreamHandler(proto, impl.handleNewQueryStream) + } + return nil +} + +// StopHandlingRequests unsets the RetrievalReceiver and would perform any other necessary +// shutdown logic. +func (impl *libp2pRetrievalMarketNetwork) StopHandlingRequests() error { + impl.receiver = nil + for _, proto := range impl.supportedProtocols { + impl.host.RemoveStreamHandler(proto) + } + return nil +} + +func (impl *libp2pRetrievalMarketNetwork) handleNewQueryStream(s network.Stream) { + if impl.receiver == nil { + log.Warn("no receiver set") + s.Reset() // nolint: errcheck,gosec + return + } + remotePID := s.Conn().RemotePeer() + buffered := bufio.NewReaderSize(s, 16) + var qs RetrievalQueryStream + if s.Protocol() == retrievalmarket.OldQueryProtocolID { + qs = &oldQueryStream{remotePID, s, buffered} + } else { + qs = &queryStream{remotePID, s, buffered} + } + impl.receiver.HandleQueryStream(qs) +} + +func (impl *libp2pRetrievalMarketNetwork) ID() peer.ID { + return impl.host.ID() +} + +func (impl *libp2pRetrievalMarketNetwork) AddAddrs(p peer.ID, addrs []ma.Multiaddr) { + impl.host.Peerstore().AddAddrs(p, addrs, 8*time.Hour) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl_test.go new file mode 100644 index 00000000000..f4c103f81ee --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/libp2p_impl_test.go @@ -0,0 +1,248 @@ +package network_test + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +type testReceiver struct { + t *testing.T + queryStreamHandler func(network.RetrievalQueryStream) +} + +func (tr *testReceiver) HandleQueryStream(s network.RetrievalQueryStream) { + defer s.Close() + if tr.queryStreamHandler != nil { + tr.queryStreamHandler(s) + } +} + +func TestQueryStreamSendReceiveQuery(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.RetrievalMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedProtocols([]protocol.ID{retrievalmarket.OldQueryProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedProtocols([]protocol.ID{retrievalmarket.OldQueryProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + qchan := make(chan retrievalmarket.Query) + tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { + readq, err := s.ReadQuery() + require.NoError(t, err) + qchan <- readq + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + // setup query stream host1 --> host 2 + assertQueryReceived(ctx, t, fromNetwork, toHost, qchan) + }) + } +} + +func TestQueryStreamSendReceiveQueryResponse(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.RetrievalMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedProtocols([]protocol.ID{retrievalmarket.OldQueryProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedProtocols([]protocol.ID{retrievalmarket.OldQueryProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + qchan := make(chan retrievalmarket.QueryResponse) + tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { + q, err := s.ReadQueryResponse() + require.NoError(t, err) + qchan <- q + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + assertQueryResponseReceived(ctx, t, fromNetwork, toHost, qchan) + }) + } +} + +func TestQueryStreamSendReceiveMultipleSuccessful(t *testing.T) { + // send query, read in handler, send response back, read response + ctxBg := context.Background() + td := shared_testutil.NewLibp2pTestData(ctxBg, t) + nw1 := network.NewFromLibp2pHost(td.Host1) + nw2 := network.NewFromLibp2pHost(td.Host2) + require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) + + // host2 gets a query and sends a response + qr := shared_testutil.MakeTestQueryResponse() + done := make(chan bool) + tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { + _, err := s.ReadQuery() + require.NoError(t, err) + + require.NoError(t, s.WriteQueryResponse(qr)) + done <- true + }} + require.NoError(t, nw2.SetDelegate(tr2)) + + ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) + defer cancel() + + qs, err := nw1.NewQueryStream(td.Host2.ID()) + require.NoError(t, err) + + testCid := shared_testutil.GenerateCids(1)[0] + + var resp retrievalmarket.QueryResponse + go require.NoError(t, qs.WriteQuery(retrievalmarket.Query{PayloadCID: testCid})) + resp, err = qs.ReadQueryResponse() + require.NoError(t, err) + + select { + case <-ctx.Done(): + t.Error("response not received") + case <-done: + } + + assert.Equal(t, qr, resp) +} + +func TestLibp2pRetrievalMarketNetwork_StopHandlingRequests(t *testing.T) { + bgCtx := context.Background() + td := shared_testutil.NewLibp2pTestData(bgCtx, t) + + fromNetwork := network.NewFromLibp2pHost(td.Host1, network.RetryParameters(0, 0, 0, 0)) + toNetwork := network.NewFromLibp2pHost(td.Host2) + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + qchan := make(chan retrievalmarket.Query) + tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { + readq, err := s.ReadQuery() + require.NoError(t, err) + qchan <- readq + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + require.NoError(t, toNetwork.StopHandlingRequests()) + + _, err := fromNetwork.NewQueryStream(toHost) + require.Error(t, err, "protocol not supported") +} + +// assertQueryReceived performs the verification that a DealStatusRequest is received +func assertQueryReceived(inCtx context.Context, t *testing.T, fromNetwork network.RetrievalMarketNetwork, toHost peer.ID, qchan chan retrievalmarket.Query) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + qs1, err := fromNetwork.NewQueryStream(toHost) + require.NoError(t, err) + + // send query to host2 + cids := shared_testutil.GenerateCids(2) + q := retrievalmarket.NewQueryV1(cids[0], &cids[1]) + require.NoError(t, qs1.WriteQuery(q)) + + var inq retrievalmarket.Query + select { + case <-ctx.Done(): + t.Error("msg not received") + case inq = <-qchan: + } + require.NotNil(t, inq) + assert.Equal(t, q.PayloadCID, inq.PayloadCID) + assert.Equal(t, q.PieceCID, inq.PieceCID) +} + +// assertQueryResponseReceived performs the verification that a DealStatusResponse is received +func assertQueryResponseReceived(inCtx context.Context, t *testing.T, + fromNetwork network.RetrievalMarketNetwork, + toHost peer.ID, + qchan chan retrievalmarket.QueryResponse) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + // setup query stream host1 --> host 2 + qs1, err := fromNetwork.NewQueryStream(toHost) + require.NoError(t, err) + + // send queryresponse to host2 + qr := shared_testutil.MakeTestQueryResponse() + require.NoError(t, qs1.WriteQueryResponse(qr)) + + // read queryresponse + var inqr retrievalmarket.QueryResponse + select { + case <-ctx.Done(): + t.Error("msg not received") + case inqr = <-qchan: + } + + require.NotNil(t, inqr) + assert.Equal(t, qr, inqr) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/network.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/network.go new file mode 100644 index 00000000000..404dea89c9c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/network.go @@ -0,0 +1,50 @@ +package network + +import ( + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +// These are the required interfaces that must be implemented to send and receive data +// for retrieval queries and deals. + +// RetrievalQueryStream is the API needed to send and receive retrieval query +// data over data-transfer network. +type RetrievalQueryStream interface { + ReadQuery() (retrievalmarket.Query, error) + WriteQuery(retrievalmarket.Query) error + ReadQueryResponse() (retrievalmarket.QueryResponse, error) + WriteQueryResponse(retrievalmarket.QueryResponse) error + Close() error + RemotePeer() peer.ID +} + +// RetrievalReceiver is the API for handling data coming in on +// both query and deal streams +type RetrievalReceiver interface { + // HandleQueryStream sends and receives data-transfer data via the + // RetrievalQueryStream provided + HandleQueryStream(RetrievalQueryStream) +} + +// RetrievalMarketNetwork is the API for creating query and deal streams and +// delegating responders to those streams. +type RetrievalMarketNetwork interface { + // NewQueryStream creates a new RetrievalQueryStream implementer using the provided peer.ID + NewQueryStream(peer.ID) (RetrievalQueryStream, error) + + // SetDelegate sets a RetrievalReceiver implementer to handle stream data + SetDelegate(RetrievalReceiver) error + + // StopHandlingRequests unsets the RetrievalReceiver and would perform any other necessary + // shutdown logic. + StopHandlingRequests() error + + // ID returns the peer id of the host for this network + ID() peer.ID + + // AddAddrs adds the given multi-addrs to the peerstore for the passed peer ID + AddAddrs(peer.ID, []ma.Multiaddr) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/old_query_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/old_query_stream.go new file mode 100644 index 00000000000..9b60bd616ed --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/old_query_stream.go @@ -0,0 +1,78 @@ +package network + +import ( + "bufio" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/retrievalmarket/migrations" +) + +type oldQueryStream struct { + p peer.ID + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ RetrievalQueryStream = (*oldQueryStream)(nil) + +func (qs *oldQueryStream) RemotePeer() peer.ID { + return qs.p +} + +func (qs *oldQueryStream) ReadQuery() (retrievalmarket.Query, error) { + var q migrations.Query0 + + if err := q.UnmarshalCBOR(qs.buffered); err != nil { + log.Warn(err) + return retrievalmarket.QueryUndefined, err + + } + + return migrations.MigrateQuery0To1(q), nil +} + +func (qs *oldQueryStream) WriteQuery(newQ retrievalmarket.Query) error { + q := migrations.Query0{ + PayloadCID: newQ.PayloadCID, + QueryParams0: migrations.QueryParams0{ + PieceCID: newQ.PieceCID, + }, + } + + return cborutil.WriteCborRPC(qs.rw, &q) +} + +func (qs *oldQueryStream) ReadQueryResponse() (retrievalmarket.QueryResponse, error) { + var resp migrations.QueryResponse0 + + if err := resp.UnmarshalCBOR(qs.buffered); err != nil { + log.Warn(err) + return retrievalmarket.QueryResponseUndefined, err + } + + return migrations.MigrateQueryResponse0To1(resp), nil +} + +func (qs *oldQueryStream) WriteQueryResponse(newQr retrievalmarket.QueryResponse) error { + qr := migrations.QueryResponse0{ + Status: newQr.Status, + PieceCIDFound: newQr.PieceCIDFound, + Size: newQr.Size, + PaymentAddress: newQr.PaymentAddress, + MinPricePerByte: newQr.MinPricePerByte, + MaxPaymentInterval: newQr.MaxPaymentInterval, + MaxPaymentIntervalIncrease: newQr.MaxPaymentIntervalIncrease, + Message: newQr.Message, + UnsealPrice: newQr.UnsealPrice, + } + return cborutil.WriteCborRPC(qs.rw, &qr) +} + +func (qs *oldQueryStream) Close() error { + return qs.rw.Close() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/query_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/query_stream.go new file mode 100644 index 00000000000..a46d63ef03b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/network/query_stream.go @@ -0,0 +1,59 @@ +package network + +import ( + "bufio" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" +) + +type queryStream struct { + p peer.ID + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ RetrievalQueryStream = (*queryStream)(nil) + +func (qs *queryStream) ReadQuery() (retrievalmarket.Query, error) { + var q retrievalmarket.Query + + if err := q.UnmarshalCBOR(qs.buffered); err != nil { + log.Warn(err) + return retrievalmarket.QueryUndefined, err + + } + + return q, nil +} + +func (qs *queryStream) RemotePeer() peer.ID { + return qs.p +} + +func (qs *queryStream) WriteQuery(q retrievalmarket.Query) error { + return cborutil.WriteCborRPC(qs.rw, &q) +} + +func (qs *queryStream) ReadQueryResponse() (retrievalmarket.QueryResponse, error) { + var resp retrievalmarket.QueryResponse + + if err := resp.UnmarshalCBOR(qs.buffered); err != nil { + log.Warn(err) + return retrievalmarket.QueryResponseUndefined, err + } + + return resp, nil +} + +func (qs *queryStream) WriteQueryResponse(qr retrievalmarket.QueryResponse) error { + return cborutil.WriteCborRPC(qs.rw, &qr) +} + +func (qs *queryStream) Close() error { + return qs.rw.Close() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/nodes.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/nodes.go new file mode 100644 index 00000000000..6cb91c9009f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/nodes.go @@ -0,0 +1,55 @@ +package retrievalmarket + +import ( + "context" + + "github.com/ipfs/go-cid" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// RetrievalClientNode are the node dependencies for a RetrievalClient +type RetrievalClientNode interface { + GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) + + // GetOrCreatePaymentChannel sets up a new payment channel if one does not exist + // between a client and a miner and ensures the client has the given amount of funds available in the channel + GetOrCreatePaymentChannel(ctx context.Context, clientAddress, minerAddress address.Address, + clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) + + // CheckAvailableFunds returns the amount of current and incoming funds in a channel + CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (ChannelAvailableFunds, error) + + // Allocate late creates a lane within a payment channel so that calls to + // CreatePaymentVoucher will automatically make vouchers only for the difference + // in total + AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) + + // CreatePaymentVoucher creates a new payment voucher in the given lane for a + // given payment channel so that all the payment vouchers in the lane add up + // to the given amount (so the payment voucher will be for the difference) + CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, + lane uint64, tok shared.TipSetToken) (*paychtypes.SignedVoucher, error) + + // WaitForPaymentChannelReady just waits for the payment channel's pending operations to complete + WaitForPaymentChannelReady(ctx context.Context, waitSentinel cid.Cid) (address.Address, error) + + // GetKnownAddresses gets any on known multiaddrs for a given address, so we can add to the peer store + GetKnownAddresses(ctx context.Context, p RetrievalPeer, tok shared.TipSetToken) ([]ma.Multiaddr, error) +} + +// RetrievalProviderNode are the node dependencies for a RetrievalProvider +type RetrievalProviderNode interface { + GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) + + // returns the worker address associated with a miner + GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) + SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) + + GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (PricingInput, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/provider.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/provider.go new file mode 100644 index 00000000000..f2926aa0802 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/provider.go @@ -0,0 +1,40 @@ +package retrievalmarket + +import ( + "context" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// ProviderSubscriber is a callback that is registered to listen for retrieval events on a provider +type ProviderSubscriber func(event ProviderEvent, state ProviderDealState) + +// RetrievalProvider is an interface by which a provider configures their +// retrieval operations and monitors deals received and process +type RetrievalProvider interface { + // Start begins listening for deals on the given host + Start(ctx context.Context) error + + // OnReady registers a listener for when the provider comes on line + OnReady(shared.ReadyFunc) + + // Stop stops handling incoming requests + Stop() error + + // SetAsk sets the retrieval payment parameters that this miner will accept + SetAsk(ask *Ask) + + // GetAsk returns the retrieval providers pricing information + GetAsk() *Ask + + // SubscribeToEvents listens for events that happen related to client retrievals + SubscribeToEvents(subscriber ProviderSubscriber) Unsubscribe + + ListDeals() map[ProviderDealIdentifier]ProviderDealState +} + +// AskStore is an interface which provides access to a persisted retrieval Ask +type AskStore interface { + GetAsk() *Ask + SetAsk(ask *Ask) error +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/retrieval_restart_integration_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/retrieval_restart_integration_test.go new file mode 100644 index 00000000000..7c1fc4a991e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/retrieval_restart_integration_test.go @@ -0,0 +1,315 @@ +package retrievalmarket_test + +import ( + "context" + "testing" + "time" + + "github.com/ipfs/go-datastore" + logger "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-data-transfer/channelmonitor" + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + dtnet "github.com/filecoin-project/go-data-transfer/network" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + testnodes2 "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +var log = logger.Logger("restart_test") + +var noOpDelay = testnodes.DelayFakeCommonNode{} + +// TODO +// TEST CONNECTION BOUNCE FOR ALL MEANINGFUL STATES OF THE CLIENT AND PROVIDER DEAL LIFECYCLE. +// CURRENTLY, WE ONLY TEST THIS FOR THE DEALSTATUS ONGOING STATE. + +// TestBounceConnectionDealTransferOngoing tests that when the the connection is +// broken and then restarted during deal data transfer for an ongoing deal, the data transfer will resume and the deal will +// complete successfully. +func TestBounceConnectionDealTransferOngoing(t *testing.T) { + bgCtx := context.Background() + logger.SetLogLevel("restart_test", "debug") + //logger.SetLogLevel("dt-impl", "debug") + //logger.SetLogLevel("dt-chanmon", "debug") + //logger.SetLogLevel("dt_graphsync", "debug") + //logger.SetLogLevel("markets-rtvl", "debug") + //logger.SetLogLevel("markets-rtvl-reval", "debug") + + tcs := map[string]struct { + unSealPrice abi.TokenAmount + pricePerByte abi.TokenAmount + paymentInterval uint64 + paymentIntervalIncrease uint64 + voucherAmts []abi.TokenAmount + maxVoucherAmt abi.TokenAmount + }{ + "non-zero unseal, non zero price per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + maxVoucherAmt: abi.NewTokenAmount(19959000), + }, + + "zero unseal, non-zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + maxVoucherAmt: abi.NewTokenAmount(19958000), + }, + + "zero unseal, zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: big.Zero(), + paymentInterval: uint64(0), + paymentIntervalIncrease: uint64(0), + maxVoucherAmt: abi.NewTokenAmount(0), + }, + + "non-zero unseal, zero price per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: big.Zero(), + maxVoucherAmt: abi.NewTokenAmount(1000), + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + dtClientNetRetry := dtnet.RetryParameters(time.Second, time.Second, 5, 1) + restartConf := dtimpl.ChannelRestartConfig(channelmonitor.Config{ + AcceptTimeout: 100 * time.Millisecond, + RestartBackoff: 100 * time.Millisecond, + RestartDebounce: 100 * time.Millisecond, + MaxConsecutiveRestarts: 5, + CompleteTimeout: 100 * time.Millisecond, + }) + td := shared_testutil.NewLibp2pTestData(bgCtx, t) + td.DTNet1 = dtnet.NewFromLibp2pHost(td.Host1, dtClientNetRetry) + depGen := dependencies.NewDepGenerator() + depGen.ClientNewDataTransfer = func(ds datastore.Batching, dir string, transferNetwork dtnet.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) { + return dtimpl.NewDataTransfer(ds, transferNetwork, transport, restartConf) + } + deps := depGen.New(t, bgCtx, td, testnodes.NewStorageMarketState(), "", noOpDelay, noOpDelay) + providerNode := testnodes2.NewTestRetrievalProviderNode() + sa := testnodes2.NewTestSectorAccessor() + pieceStore := shared_testutil.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + deps.DagStore = dagStore + + sh := testharness.NewHarnessWithTestData(t, deps.TestData, deps, true, false) + + // do a storage deal + storageClientSeenDeal := doStorage(t, bgCtx, sh) + ctxTimeout, canc := context.WithTimeout(bgCtx, 5*time.Second) + defer canc() + + // create a retrieval test harness + params := retrievalmarket.Params{ + UnsealPrice: tc.unSealPrice, + PricePerByte: tc.pricePerByte, + PaymentInterval: tc.paymentInterval, + PaymentIntervalIncrease: tc.paymentIntervalIncrease, + } + rh := newRetrievalHarnessWithDeps(ctxTimeout, t, sh, storageClientSeenDeal, providerNode, sa, pieceStore, dagStore, params) + clientHost := rh.TestDataNet.Host1.ID() + providerHost := rh.TestDataNet.Host2.ID() + + // Bounce connection after this many bytes have been queued for sending + bounceConnectionAt := map[uint64]bool{ + 1000: false, + 3000: false, + 5000: false, + 7000: false, + 9000: false, + } + + sh.DTProvider.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + if event.Code == datatransfer.DataQueuedProgress { + log.Debugf("DataQueuedProgress %d", channelState.Queued()) + // Check if enough bytes have been queued that the connection + // should be bounced + for at, already := range bounceConnectionAt { + if channelState.Queued() > at && !already { + bounceConnectionAt[at] = true + + // Break the connection + queued := channelState.Queued() + sent := channelState.Sent() + t.Logf("breaking connection at queue %d sent %d bytes", queued, sent) + rh.TestDataNet.MockNet.DisconnectPeers(clientHost, providerHost) + rh.TestDataNet.MockNet.UnlinkPeers(clientHost, providerHost) + + go func() { + time.Sleep(100 * time.Millisecond) + t.Logf("restoring connection at queue %d sent %d bytes", queued, sent) + rh.TestDataNet.MockNet.LinkPeers(clientHost, providerHost) + }() + } + } + } + if event.Code == datatransfer.DataSent { + log.Debugf("DataSent %d", channelState.Sent()) + } + if event.Code == datatransfer.DataSentProgress { + log.Debugf("DataSentProgress %d", channelState.Sent()) + } + }) + sh.DTClient.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + if event.Code == datatransfer.DataReceived { + log.Debugf("DataReceived %d", channelState.Received()) + } + if event.Code == datatransfer.DataReceivedProgress { + log.Debugf("DataReceivedProgress %d", channelState.Received()) + } + }) + + checkRetrieve(t, bgCtx, rh, sh, tc.voucherAmts) + require.Equal(t, tc.maxVoucherAmt, rh.ProviderNode.MaxReceivedVoucher()) + }) + } +} + +// TestBounceConnectionDealTransferUnsealing tests that when the the connection +// is broken and then restarted during unsealing, the data transfer will resume +// and the deal will complete successfully. +func TestBounceConnectionDealTransferUnsealing(t *testing.T) { + bgCtx := context.Background() + //logger.SetLogLevel("dt-chanmon", "debug") + //logger.SetLogLevel("retrieval", "debug") + //logger.SetLogLevel("retrievalmarket_impl", "debug") + logger.SetLogLevel("restart_test", "debug") + //logger.SetLogLevel("markets-rtvl-reval", "debug") + //logger.SetLogLevel("graphsync", "debug") + //logger.SetLogLevel("gs-traversal", "debug") + //logger.SetLogLevel("gs-executor", "debug") + + beforeRestoringConnection := true + afterRestoringConnection := !beforeRestoringConnection + tcs := []struct { + name string + finishUnseal bool + }{{ + name: "finish unseal before restoring connection", + finishUnseal: beforeRestoringConnection, + }, { + name: "finish unseal after restoring connection", + finishUnseal: afterRestoringConnection, + }} + + for _, tc := range tcs { + tc := tc + t.Run(tc.name, func(t *testing.T) { + restartComplete := make(chan struct{}) + onRestartComplete := func(_ datatransfer.ChannelID) { + close(restartComplete) + } + + dtClientNetRetry := dtnet.RetryParameters(time.Second, time.Second, 5, 1) + restartConf := dtimpl.ChannelRestartConfig(channelmonitor.Config{ + AcceptTimeout: 100 * time.Millisecond, + RestartBackoff: 100 * time.Millisecond, + RestartDebounce: 100 * time.Millisecond, + MaxConsecutiveRestarts: 5, + CompleteTimeout: 100 * time.Millisecond, + OnRestartComplete: onRestartComplete, + }) + td := shared_testutil.NewLibp2pTestData(bgCtx, t) + td.DTNet1 = dtnet.NewFromLibp2pHost(td.Host1, dtClientNetRetry) + depGen := dependencies.NewDepGenerator() + depGen.ClientNewDataTransfer = func(ds datastore.Batching, dir string, transferNetwork dtnet.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) { + return dtimpl.NewDataTransfer(ds, transferNetwork, transport, restartConf) + } + deps := depGen.New(t, bgCtx, td, testnodes.NewStorageMarketState(), "", noOpDelay, noOpDelay) + providerNode := testnodes2.NewTestRetrievalProviderNode() + sa := testnodes2.NewTestSectorAccessor() + pieceStore := shared_testutil.NewTestPieceStore() + dagStore := tut.NewMockDagStoreWrapper(pieceStore, sa) + deps.DagStore = dagStore + + sh := testharness.NewHarnessWithTestData(t, td, deps, true, false) + + // do a storage deal + storageClientSeenDeal := doStorage(t, bgCtx, sh) + ctxTimeout, canc := context.WithTimeout(bgCtx, 5*time.Second) + defer canc() + + // create a retrieval test harness + maxVoucherAmt := abi.NewTokenAmount(19959000) + params := retrievalmarket.Params{ + UnsealPrice: abi.NewTokenAmount(1000), + PricePerByte: abi.NewTokenAmount(1000), + PaymentInterval: uint64(10000), + PaymentIntervalIncrease: uint64(1000), + } + rh := newRetrievalHarnessWithDeps(ctxTimeout, t, sh, storageClientSeenDeal, providerNode, sa, pieceStore, dagStore, params) + clientHost := rh.TestDataNet.Host1.ID() + providerHost := rh.TestDataNet.Host2.ID() + + // Pause unsealing + rh.SectorAccessor.PauseUnseal() + + firstPayRcvd := false + rh.Provider.SubscribeToEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { + // When the provider receives the first payment from the + // client (the payment for unsealing), the provider moves + // to the unsealing state + if event != retrievalmarket.ProviderEventPaymentReceived || firstPayRcvd { + return + } + + firstPayRcvd = true + + log.Debugf("breaking connection at %s", retrievalmarket.ProviderEvents[event]) + rh.TestDataNet.MockNet.DisconnectPeers(clientHost, providerHost) + rh.TestDataNet.MockNet.UnlinkPeers(clientHost, providerHost) + + go func() { + // Simulate unsealing delay + time.Sleep(50 * time.Millisecond) + + // If unsealing should finish before restoring the connection + if tc.finishUnseal == beforeRestoringConnection { + // Finish unsealing + log.Debugf("finish unseal") + rh.SectorAccessor.FinishUnseal() + time.Sleep(20 * time.Millisecond) + } + + // Restore the connection + log.Debugf("restoring connection") + rh.TestDataNet.MockNet.LinkPeers(clientHost, providerHost) + + // If unsealing should finish after restoring the connection + if tc.finishUnseal == afterRestoringConnection { + // Wait for the Restart message to be sent and + // acknowledged + select { + case <-ctxTimeout.Done(): + return + case <-restartComplete: + } + + // Finish unsealing + time.Sleep(50 * time.Millisecond) + log.Debugf("finish unseal") + rh.SectorAccessor.FinishUnseal() + } + }() + }) + + checkRetrieve(t, bgCtx, rh, sh, nil) + require.Equal(t, maxVoucherAmt, rh.ProviderNode.MaxReceivedVoucher()) + }) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/sectoraccessor.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/sectoraccessor.go new file mode 100644 index 00000000000..9447441012f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/sectoraccessor.go @@ -0,0 +1,14 @@ +package retrievalmarket + +import ( + "context" + "io" + + "github.com/filecoin-project/go-state-types/abi" +) + +// SectorAccessor provides methods to unseal and get the seal status of a sector +type SectorAccessor interface { + UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) + IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/storage_retrieval_integration_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/storage_retrieval_integration_test.go new file mode 100644 index 00000000000..f08108730e0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/storage_retrieval_integration_test.go @@ -0,0 +1,601 @@ +package retrievalmarket_test + +import ( + "bytes" + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipld/go-car" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" + testnodes2 "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +func TestStorageRetrieval(t *testing.T) { + bgCtx := context.Background() + + tcs := map[string]struct { + unSealPrice abi.TokenAmount + pricePerByte abi.TokenAmount + paymentInterval uint64 + paymentIntervalIncrease uint64 + voucherAmts []abi.TokenAmount + }{ + + "non-zero unseal, zero price per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: big.Zero(), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1000)}, + }, + + "zero unseal, non-zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + }, + + "zero unseal, zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: big.Zero(), + paymentInterval: uint64(0), + paymentIntervalIncrease: uint64(0), + voucherAmts: nil, + }, + + "non-zero unseal, non zero prices per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1000), abi.NewTokenAmount(10175000), abi.NewTokenAmount(19959000)}, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + providerNode := testnodes2.NewTestRetrievalProviderNode() + sa := testnodes2.NewTestSectorAccessor() + pieceStore := tut.NewTestPieceStore() + deps := setupDepsWithDagStore(bgCtx, t, sa, pieceStore) + dagStore := deps.DagStore.(*tut.MockDagStoreWrapper) + sh := testharness.NewHarnessWithTestData(t, deps.TestData, deps, true, false) + + storageProviderSeenDeal := doStorage(t, bgCtx, sh) + ctxTimeout, canc := context.WithTimeout(bgCtx, 25*time.Second) + defer canc() + + params := retrievalmarket.Params{ + UnsealPrice: tc.unSealPrice, + PricePerByte: tc.pricePerByte, + PaymentInterval: tc.paymentInterval, + PaymentIntervalIncrease: tc.paymentIntervalIncrease, + } + rh := newRetrievalHarnessWithDeps(ctxTimeout, t, sh, storageProviderSeenDeal, providerNode, sa, pieceStore, dagStore, params) + + checkRetrieve(t, bgCtx, rh, sh, tc.voucherAmts) + }) + } +} + +func TestOfflineStorageRetrieval(t *testing.T) { + bgCtx := context.Background() + + tcs := map[string]struct { + unSealPrice abi.TokenAmount + pricePerByte abi.TokenAmount + paymentInterval uint64 + paymentIntervalIncrease uint64 + voucherAmts []abi.TokenAmount + }{ + + "non-zero unseal, zero price per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: big.Zero(), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1000)}, + }, + + "zero unseal, non-zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10174000), abi.NewTokenAmount(19958000)}, + }, + + "zero unseal, zero price per byte": { + unSealPrice: big.Zero(), + pricePerByte: big.Zero(), + paymentInterval: uint64(0), + paymentIntervalIncrease: uint64(0), + voucherAmts: nil, + }, + + "non-zero unseal, non zero prices per byte": { + unSealPrice: abi.NewTokenAmount(1000), + pricePerByte: abi.NewTokenAmount(1000), + paymentInterval: uint64(10000), + paymentIntervalIncrease: uint64(1000), + voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1000), abi.NewTokenAmount(10175000), abi.NewTokenAmount(19959000)}, + }, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + // offline storage + providerNode := testnodes2.NewTestRetrievalProviderNode() + sa := testnodes2.NewTestSectorAccessor() + pieceStore := tut.NewTestPieceStore() + deps := setupDepsWithDagStore(bgCtx, t, sa, pieceStore) + dagStore := deps.DagStore.(*tut.MockDagStoreWrapper) + sh := testharness.NewHarnessWithTestData(t, deps.TestData, deps, true, false) + + // start and wait for client/provider + ctx, cancel := context.WithTimeout(bgCtx, 5*time.Second) + defer cancel() + shared_testutil.StartAndWaitForReady(ctx, t, sh.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, sh.Client) + + // Do a Selective CARv1 traversal on the CARv2 file to get a deterministic CARv1 that we can import on the miner side. + sc := car.NewSelectiveCar(ctx, sh.Data, []car.Dag{{Root: sh.PayloadCid, Selector: selectorparse.CommonSelector_ExploreAllRecursively}}) + prepared, err := sc.Prepare() + require.NoError(t, err) + carBuf := new(bytes.Buffer) + require.NoError(t, prepared.Write(carBuf)) + + commP, size, err := clientutils.CommP(ctx, sh.Data, &storagemarket.DataRef{ + // hacky but need it for now because if it's manual, we wont get a CommP. + TransferType: storagemarket.TTGraphsync, + Root: sh.PayloadCid, + }, 2<<29) + require.NoError(t, err) + + // propose deal + dataRef := &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: sh.PayloadCid, + PieceCid: &commP, + PieceSize: size, + } + result := sh.ProposeStorageDeal(t, dataRef, false, false) + proposalCid := result.ProposalCid + + wg := sync.WaitGroup{} + sh.WaitForClientEvent(&wg, storagemarket.ClientEventDataTransferComplete) + sh.WaitForProviderEvent(&wg, storagemarket.ProviderEventDataRequested) + waitGroupWait(ctx, &wg) + + cd, err := sh.Client.GetLocalDeal(ctx, proposalCid) + assert.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ = sh.Client.GetLocalDeal(ctx, proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 1*time.Second, 100*time.Millisecond, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + providerDeals, err := sh.Provider.ListLocalDeals() + assert.NoError(t, err) + pd := providerDeals[0] + assert.True(t, pd.ProposalCid.Equals(proposalCid)) + shared_testutil.AssertDealState(t, storagemarket.StorageDealWaitingForData, pd.State) + + // provider imports deal + err = sh.Provider.ImportDataForDeal(ctx, pd.ProposalCid, carBuf) + require.NoError(t, err) + + // wait for event signalling deal completion. + sh.WaitForClientEvent(&wg, storagemarket.ClientEventDealExpired) + sh.WaitForProviderEvent(&wg, storagemarket.ProviderEventDealExpired) + waitGroupWait(ctx, &wg) + + // client asserts expiry + require.Eventually(t, func() bool { + cd, _ = sh.Client.GetLocalDeal(ctx, proposalCid) + return cd.State == storagemarket.StorageDealExpired + }, 5*time.Second, 100*time.Millisecond) + + // provider asserts expiry + require.Eventually(t, func() bool { + providerDeals, _ = sh.Provider.ListLocalDeals() + pd = providerDeals[0] + return pd.State == storagemarket.StorageDealExpired + }, 5*time.Second, 100*time.Millisecond) + + t.Log("Offline storage complete") + + // Retrieve + ctxTimeout, canc := context.WithTimeout(bgCtx, 25*time.Second) + defer canc() + params := retrievalmarket.Params{ + UnsealPrice: tc.unSealPrice, + PricePerByte: tc.pricePerByte, + PaymentInterval: tc.paymentInterval, + PaymentIntervalIncrease: tc.paymentIntervalIncrease, + } + rh := newRetrievalHarnessWithDeps(ctxTimeout, t, sh, cd, providerNode, sa, pieceStore, dagStore, params) + + checkRetrieve(t, bgCtx, rh, sh, tc.voucherAmts) + }) + } +} + +func checkRetrieve(t *testing.T, bgCtx context.Context, rh *retrievalHarness, sh *testharness.StorageHarness, vAmts []abi.TokenAmount) { + clientDealStateChan := make(chan retrievalmarket.ClientDealState) + + rh.Client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { + switch state.Status { + case retrievalmarket.DealStatusCompleted: + clientDealStateChan <- state + default: + msg := ` + Client: + Event: %s + Status: %s + TotalReceived: %d + BytesPaidFor: %d + CurrentInterval: %d + TotalFunds: %s + Message: %s + ` + + t.Logf(msg, retrievalmarket.ClientEvents[event], retrievalmarket.DealStatuses[state.Status], state.TotalReceived, state.BytesPaidFor, state.CurrentInterval, + state.TotalFunds.String(), state.Message) + } + }) + + providerDealStateChan := make(chan retrievalmarket.ProviderDealState) + rh.Provider.SubscribeToEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { + switch state.Status { + case retrievalmarket.DealStatusCompleted: + providerDealStateChan <- state + default: + msg := ` + Provider: + Event: %s + Status: %s + TotalSent: %d + FundsReceived: %s + Message: %s + CurrentInterval: %d + ` + t.Logf(msg, retrievalmarket.ProviderEvents[event], retrievalmarket.DealStatuses[state.Status], state.TotalSent, state.FundsReceived.String(), state.Message, + state.CurrentInterval) + } + }) + + fsize := doRetrieve(t, bgCtx, rh, sh, vAmts) + + ctxTimeout, cancel := context.WithTimeout(bgCtx, 10*time.Second) + defer cancel() + + // verify that client subscribers will be notified of state changes + var clientDealState retrievalmarket.ClientDealState + select { + case <-ctxTimeout.Done(): + t.Error("deal never completed") + t.FailNow() + case clientDealState = <-clientDealStateChan: + } + + ctxTimeout, cancel = context.WithTimeout(bgCtx, 10*time.Second) + defer cancel() + var providerDealState retrievalmarket.ProviderDealState + select { + case <-ctxTimeout.Done(): + t.Error("provider never saw completed deal") + t.FailNow() + case providerDealState = <-providerDealStateChan: + } + + require.Equal(t, retrievalmarket.DealStatusCompleted, providerDealState.Status) + require.Equal(t, retrievalmarket.DealStatusCompleted, clientDealState.Status) + + rh.ClientNode.VerifyExpectations(t) + + sh.TestData.VerifyFileTransferredIntoStore(t, cidlink.Link{Cid: sh.PayloadCid}, rh.BlockstoreAccessor.Blockstore, uint64(fsize)) +} + +// waitGroupWait calls wg.Wait while respecting context cancellation +func waitGroupWait(ctx context.Context, wg *sync.WaitGroup) { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + case <-done: + } +} + +var _ datatransfer.RequestValidator = (*fakeDTValidator)(nil) + +type retrievalHarness struct { + Ctx context.Context + Epoch abi.ChainEpoch + Client retrievalmarket.RetrievalClient + ClientNode *testnodes2.TestRetrievalClientNode + Provider retrievalmarket.RetrievalProvider + ProviderNode *testnodes2.TestRetrievalProviderNode + BlockstoreAccessor *tut.TestRetrievalBlockstoreAccessor + SectorAccessor *testnodes2.TestSectorAccessor + PieceStore piecestore.PieceStore + ExpPaych, NewLaneAddr *address.Address + ExpPaychAmt, ActualPaychAmt *abi.TokenAmount + ExpVoucher, ActualVoucher *paych.SignedVoucher + RetrievalParams retrievalmarket.Params + + TestDataNet *shared_testutil.Libp2pTestData +} + +func setupDepsWithDagStore(ctx context.Context, t *testing.T, sa *testnodes2.TestSectorAccessor, pieceStore *tut.TestPieceStore) *dependencies.StorageDependencies { + smState := testnodes.NewStorageMarketState() + td := shared_testutil.NewLibp2pTestData(ctx, t) + deps := dependencies.NewDependenciesWithTestData(t, ctx, td, smState, "", testnodes.DelayFakeCommonNode{}, testnodes.DelayFakeCommonNode{}) + deps.DagStore = tut.NewMockDagStoreWrapper(pieceStore, sa) + return deps +} + +func newRetrievalHarnessWithDeps( + ctx context.Context, + t *testing.T, + sh *testharness.StorageHarness, + deal storagemarket.ClientDeal, + providerNode *testnodes2.TestRetrievalProviderNode, + sa *testnodes2.TestSectorAccessor, + pieceStore *tut.TestPieceStore, + dagStore *tut.MockDagStoreWrapper, + params ...retrievalmarket.Params, +) *retrievalHarness { + var newPaychAmt abi.TokenAmount + paymentChannelRecorder := func(client, miner address.Address, amt abi.TokenAmount) { + newPaychAmt = amt + } + + var newLaneAddr address.Address + laneRecorder := func(paymentChannel address.Address) { + newLaneAddr = paymentChannel + } + + var newVoucher paych.SignedVoucher + paymentVoucherRecorder := func(v *paych.SignedVoucher) { + newVoucher = *v + } + + cids := tut.GenerateCids(2) + clientPaymentChannel, err := address.NewActorAddress([]byte("a")) + + expectedVoucher := tut.MakeTestSignedVoucher() + require.NoError(t, err) + clientNode := testnodes2.NewTestRetrievalClientNode(testnodes2.TestRetrievalClientNodeParams{ + Lane: expectedVoucher.Lane, + PayCh: clientPaymentChannel, + Voucher: expectedVoucher, + PaymentChannelRecorder: paymentChannelRecorder, + AllocateLaneRecorder: laneRecorder, + PaymentVoucherRecorder: paymentVoucherRecorder, + CreatePaychCID: cids[0], + AddFundsCID: cids[1], + IntegrationTest: true, + }) + + nw1 := rmnet.NewFromLibp2pHost(sh.TestData.Host1, rmnet.RetryParameters(0, 0, 0, 0)) + clientDs := namespace.Wrap(sh.TestData.Ds1, datastore.NewKey("/retrievals/client")) + ba := tut.NewTestRetrievalBlockstoreAccessor() + client, err := retrievalimpl.NewClient(nw1, sh.DTClient, clientNode, sh.PeerResolver, clientDs, ba) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, client) + payloadCID := deal.DataRef.Root + providerPaymentAddr := deal.MinerWorker + + // Get the data passed to the sealing code when the last deal completed. + // This is the padded CAR file. + carData := sh.ProviderNode.LastOnDealCompleteBytes + expectedPiece := deal.Proposal.PieceCID + sectorID := abi.SectorNumber(100000) + offset := abi.PaddedPieceSize(1000) + pieceInfo := piecestore.PieceInfo{ + PieceCID: expectedPiece, + Deals: []piecestore.DealInfo{ + { + SectorID: sectorID, + Offset: offset, + Length: abi.UnpaddedPieceSize(uint64(len(carData))).Padded(), + }, + }, + } + sa.ExpectUnseal(sectorID, offset.Unpadded(), abi.UnpaddedPieceSize(uint64(len(carData))), carData) + + // clear out provider blockstore + allCids, err := sh.TestData.Bs2.AllKeysChan(sh.Ctx) + require.NoError(t, err) + for c := range allCids { + err = sh.TestData.Bs2.DeleteBlock(ctx, c) + require.NoError(t, err) + } + + nw2 := rmnet.NewFromLibp2pHost(sh.TestData.Host2, rmnet.RetryParameters(0, 0, 0, 0)) + cidInfo := piecestore.CIDInfo{ + PieceBlockLocations: []piecestore.PieceBlockLocation{ + { + PieceCID: expectedPiece, + }, + }, + } + pieceStore.ExpectCID(payloadCID, cidInfo) + pieceStore.ExpectPiece(expectedPiece, pieceInfo) + dagStore.AddBlockToPieceIndex(payloadCID, expectedPiece) + providerDs := namespace.Wrap(sh.TestData.Ds2, datastore.NewKey("/retrievals/provider")) + + var p retrievalmarket.Params + if len(params) == 0 { + p = retrievalmarket.Params{ + PricePerByte: abi.NewTokenAmount(1000), + PaymentInterval: uint64(10000), + PaymentIntervalIncrease: uint64(1000), + UnsealPrice: big.Zero(), + } + } else { + p = params[0] + } + + priceFunc := func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + ask := retrievalmarket.Ask{} + ask.PaymentInterval = p.PaymentInterval + ask.PaymentIntervalIncrease = p.PaymentIntervalIncrease + ask.PricePerByte = p.PricePerByte + ask.UnsealPrice = p.UnsealPrice + + return ask, nil + } + + provider, err := retrievalimpl.NewProvider( + providerPaymentAddr, providerNode, sa, nw2, pieceStore, + sh.DagStore, sh.DTProvider, providerDs, priceFunc) + require.NoError(t, err) + tut.StartAndWaitForReady(ctx, t, provider) + + return &retrievalHarness{ + Ctx: ctx, + Client: client, + ClientNode: clientNode, + Epoch: sh.Epoch, + ExpPaych: &clientPaymentChannel, + NewLaneAddr: &newLaneAddr, + ActualPaychAmt: &newPaychAmt, + ExpVoucher: expectedVoucher, + ActualVoucher: &newVoucher, + Provider: provider, + ProviderNode: providerNode, + SectorAccessor: sa, + BlockstoreAccessor: ba, + PieceStore: sh.PieceStore, + RetrievalParams: p, + TestDataNet: sh.TestData, + } +} + +type fakeDTValidator struct{} + +func (v *fakeDTValidator) ValidatePush(isRestart bool, _ datatransfer.ChannelID, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + return nil, nil +} + +func (v *fakeDTValidator) ValidatePull(isRestart bool, _ datatransfer.ChannelID, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + return nil, nil +} + +func doStorage(t *testing.T, ctx context.Context, sh *testharness.StorageHarness) storagemarket.ClientDeal { + shared_testutil.StartAndWaitForReady(ctx, t, sh.Client) + shared_testutil.StartAndWaitForReady(ctx, t, sh.Provider) + + // set up a subscriber + providerDealChan := make(chan storagemarket.MinerDeal) + subscriber := func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + providerDealChan <- deal + } + _ = sh.Provider.SubscribeToEvents(subscriber) + + clientDealChan := make(chan storagemarket.ClientDeal) + clientSubscriber := func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + clientDealChan <- deal + } + _ = sh.Client.SubscribeToEvents(clientSubscriber) + + // set ask price where we'll accept any price + err := sh.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + assert.NoError(t, err) + + result := sh.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: sh.PayloadCid}, false, false) + require.False(t, result.ProposalCid.Equals(cid.Undef)) + + time.Sleep(time.Millisecond * 200) + + ctxTimeout, canc := context.WithTimeout(ctx, 25*time.Second) + defer canc() + + var storageProviderSeenDeal storagemarket.MinerDeal + var storageClientSeenDeal storagemarket.ClientDeal + for storageProviderSeenDeal.State != storagemarket.StorageDealExpired || + storageClientSeenDeal.State != storagemarket.StorageDealExpired { + select { + case storageProviderSeenDeal = <-providerDealChan: + case storageClientSeenDeal = <-clientDealChan: + case <-ctxTimeout.Done(): + t.Fatalf("never saw completed deal, client deal state: %s (%d), provider deal state: %s (%d)", + storagemarket.DealStates[storageClientSeenDeal.State], + storageClientSeenDeal.State, + storagemarket.DealStates[storageProviderSeenDeal.State], + storageProviderSeenDeal.State, + ) + } + } + // --------------- + fmt.Println("\n Storage is complete") + + return storageClientSeenDeal +} + +func doRetrieve(t *testing.T, ctx context.Context, rh *retrievalHarness, sh *testharness.StorageHarness, voucherAmts []abi.TokenAmount) int { + + proof := []byte("") + for _, voucherAmt := range voucherAmts { + require.NoError(t, rh.ProviderNode.ExpectVoucher(*rh.ExpPaych, rh.ExpVoucher, proof, voucherAmt, voucherAmt, nil)) + } + + peers := rh.Client.FindProviders(sh.PayloadCid) + require.Len(t, peers, 1) + retrievalPeer := peers[0] + require.NotNil(t, retrievalPeer.PieceCID) + + rh.ClientNode.ExpectKnownAddresses(retrievalPeer, nil) + + resp, err := rh.Client.Query(ctx, retrievalPeer, sh.PayloadCid, retrievalmarket.QueryParams{}) + require.NoError(t, err) + require.Equal(t, retrievalmarket.QueryResponseAvailable, resp.Status) + + // testing V1 only + rmParams, err := retrievalmarket.NewParamsV1(rh.RetrievalParams.PricePerByte, rh.RetrievalParams.PaymentInterval, rh.RetrievalParams.PaymentIntervalIncrease, + selectorparse.CommonSelector_ExploreAllRecursively, nil, + rh.RetrievalParams.UnsealPrice) + require.NoError(t, err) + + // just make sure there is enough to cover the transfer + fsize := 19000 // this is the known file size of the test file lorem.txt + expectedTotal := big.Add(big.Mul(rh.RetrievalParams.PricePerByte, abi.NewTokenAmount(int64(fsize*2))), rh.RetrievalParams.UnsealPrice) + + // *** Retrieve the piece + _, err = rh.Client.Retrieve(ctx, 0, sh.PayloadCid, rmParams, expectedTotal, retrievalPeer, *rh.ExpPaych, retrievalPeer.Address) + require.NoError(t, err) + + return fsize +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/testing/test_provider_deal_environment.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/testing/test_provider_deal_environment.go new file mode 100644 index 00000000000..3d4191dca61 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/testing/test_provider_deal_environment.go @@ -0,0 +1,66 @@ +// Package testing provides test implementations of retieval market interfaces +package testing + +import ( + "context" + + "github.com/ipfs/go-cid" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" +) + +// TestProviderDealEnvironment is a test implementation of ProviderDealEnvironment used +// by the provider state machine. +type TestProviderDealEnvironment struct { + node rm.RetrievalProviderNode + ResumeDataTransferError error + PrepareBlockstoreError error + TrackTransferError error + UntrackTransferError error + CloseDataTransferError error + DeleteStoreError error +} + +// NewTestProviderDealEnvironment returns a new TestProviderDealEnvironment instance +func NewTestProviderDealEnvironment(node rm.RetrievalProviderNode) *TestProviderDealEnvironment { + return &TestProviderDealEnvironment{ + node: node, + } +} + +// Node returns a provider node instance +func (te *TestProviderDealEnvironment) Node() rm.RetrievalProviderNode { + return te.node +} + +func (te *TestProviderDealEnvironment) DeleteStore(dealID rm.DealID) error { + return te.DeleteStoreError +} + +func (te *TestProviderDealEnvironment) PrepareBlockstore(ctx context.Context, dealID rm.DealID, pieceCid cid.Cid) error { + return te.PrepareBlockstoreError +} + +func (te *TestProviderDealEnvironment) TrackTransfer(deal rm.ProviderDealState) error { + return te.TrackTransferError +} + +func (te *TestProviderDealEnvironment) UntrackTransfer(deal rm.ProviderDealState) error { + return te.UntrackTransferError +} + +func (te *TestProviderDealEnvironment) ResumeDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return te.ResumeDataTransferError +} + +func (te *TestProviderDealEnvironment) CloseDataTransfer(_ context.Context, _ datatransfer.ChannelID) error { + return te.CloseDataTransferError +} + +// TrivialTestDecider is a shortest possible DealDecider that accepts all deals +var TrivialTestDecider retrievalimpl.DealDecider = func(_ context.Context, _ rm.ProviderDealState) (bool, string, error) { + return true, "", nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types.go new file mode 100644 index 00000000000..301e7628f3e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types.go @@ -0,0 +1,436 @@ +package retrievalmarket + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/piecestore" +) + +//go:generate cbor-gen-for --map-encoding Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment ClientDealState ProviderDealState PaymentInfo RetrievalPeer Ask + +// QueryProtocolID is the protocol for querying information about retrieval +// deal parameters +const QueryProtocolID = protocol.ID("/fil/retrieval/qry/1.0.0") + +// OldQueryProtocolID is the old query protocol for tuple structs +const OldQueryProtocolID = protocol.ID("/fil/retrieval/qry/0.0.1") + +// Unsubscribe is a function that unsubscribes a subscriber for either the +// client or the provider +type Unsubscribe func() + +// PaymentInfo is the payment channel and lane for a deal, once it is setup +type PaymentInfo struct { + PayCh address.Address + Lane uint64 +} + +// ClientDealState is the current state of a deal from the point of view +// of a retrieval client +type ClientDealState struct { + DealProposal + StoreID *uint64 + // Set when the data transfer is started + ChannelID *datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *PaymentInfo + Status DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for + VoucherShortfall abi.TokenAmount + LegacyProtocol bool +} + +func (deal *ClientDealState) NextInterval() uint64 { + return deal.Params.NextInterval(deal.CurrentInterval) +} + +// ProviderDealState is the current state of a deal from the point of view +// of a retrieval provider +type ProviderDealState struct { + DealProposal + StoreID uint64 + + ChannelID *datatransfer.ChannelID + PieceInfo *piecestore.PieceInfo + Status DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool +} + +func (deal *ProviderDealState) IntervalLowerBound() uint64 { + return deal.Params.IntervalLowerBound(deal.CurrentInterval) +} + +func (deal *ProviderDealState) NextInterval() uint64 { + return deal.Params.NextInterval(deal.CurrentInterval) +} + +// Identifier provides a unique id for this provider deal +func (pds ProviderDealState) Identifier() ProviderDealIdentifier { + return ProviderDealIdentifier{Receiver: pds.Receiver, DealID: pds.ID} +} + +// ProviderDealIdentifier is a value that uniquely identifies a deal +type ProviderDealIdentifier struct { + Receiver peer.ID + DealID DealID +} + +func (p ProviderDealIdentifier) String() string { + return fmt.Sprintf("%v/%v", p.Receiver, p.DealID) +} + +// RetrievalPeer is a provider address/peer.ID pair (everything needed to make +// deals for with a miner) +type RetrievalPeer struct { + Address address.Address + ID peer.ID // optional + PieceCID *cid.Cid +} + +// QueryResponseStatus indicates whether a queried piece is available +type QueryResponseStatus uint64 + +const ( + // QueryResponseAvailable indicates a provider has a piece and is prepared to + // return it + QueryResponseAvailable QueryResponseStatus = iota + + // QueryResponseUnavailable indicates a provider either does not have or cannot + // serve the queried piece to the client + QueryResponseUnavailable + + // QueryResponseError indicates something went wrong generating a query response + QueryResponseError +) + +// QueryItemStatus (V1) indicates whether the requested part of a piece (payload or selector) +// is available for retrieval +type QueryItemStatus uint64 + +const ( + // QueryItemAvailable indicates requested part of the piece is available to be + // served + QueryItemAvailable QueryItemStatus = iota + + // QueryItemUnavailable indicates the piece either does not contain the requested + // item or it cannot be served + QueryItemUnavailable + + // QueryItemUnknown indicates the provider cannot determine if the given item + // is part of the requested piece (for example, if the piece is sealed and the + // miner does not maintain a payload CID index) + QueryItemUnknown +) + +// QueryParams - V1 - indicate what specific information about a piece that a retrieval +// client is interested in, as well as specific parameters the client is seeking +// for the retrieval deal +type QueryParams struct { + PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this + //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this + //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this +} + +// Query is a query to a given provider to determine information about a piece +// they may have available for retrieval +type Query struct { + PayloadCID cid.Cid // V0 + QueryParams // V1 +} + +// QueryUndefined is a query with no values +var QueryUndefined = Query{} + +// NewQueryV0 creates a V0 query (which only specifies a payload) +func NewQueryV0(payloadCID cid.Cid) Query { + return Query{PayloadCID: payloadCID} +} + +// NewQueryV1 creates a V1 query (which has an optional pieceCID) +func NewQueryV1(payloadCID cid.Cid, pieceCID *cid.Cid) Query { + return Query{ + PayloadCID: payloadCID, + QueryParams: QueryParams{ + PieceCID: pieceCID, + }, + } +} + +// QueryResponse is a miners response to a given retrieval query +type QueryResponse struct { + Status QueryResponseStatus + PieceCIDFound QueryItemStatus // V1 - if a PieceCID was requested, the result + //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result + + Size uint64 // Total size of piece in bytes + //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size + + PaymentAddress address.Address // address to send funds to -- may be different than miner addr + MinPricePerByte abi.TokenAmount + MaxPaymentInterval uint64 + MaxPaymentIntervalIncrease uint64 + Message string + UnsealPrice abi.TokenAmount +} + +// QueryResponseUndefined is an empty QueryResponse +var QueryResponseUndefined = QueryResponse{} + +// PieceRetrievalPrice is the total price to retrieve the piece (size * MinPricePerByte + UnsealedPrice) +func (qr QueryResponse) PieceRetrievalPrice() abi.TokenAmount { + return big.Add(big.Mul(qr.MinPricePerByte, abi.NewTokenAmount(int64(qr.Size))), qr.UnsealPrice) +} + +// PayloadRetrievalPrice is the expected price to retrieve just the given payload +// & selector (V1) +//func (qr QueryResponse) PayloadRetrievalPrice() abi.TokenAmount { +// return types.BigMul(qr.MinPricePerByte, types.NewInt(qr.ExpectedPayloadSize)) +//} + +// IsTerminalError returns true if this status indicates processing of this deal +// is complete with an error +func IsTerminalError(status DealStatus) bool { + return status == DealStatusDealNotFound || + status == DealStatusFailing || + status == DealStatusRejected +} + +// IsTerminalSuccess returns true if this status indicates processing of this deal +// is complete with a success +func IsTerminalSuccess(status DealStatus) bool { + return status == DealStatusCompleted +} + +// IsTerminalStatus returns true if this status indicates processing of a deal is +// complete (either success or error) +func IsTerminalStatus(status DealStatus) bool { + return IsTerminalError(status) || IsTerminalSuccess(status) +} + +// Params are the parameters requested for a retrieval deal proposal +type Params struct { + Selector *cbg.Deferred // V1 + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + PaymentInterval uint64 // when to request payment + PaymentIntervalIncrease uint64 + UnsealPrice abi.TokenAmount +} + +func (p Params) SelectorSpecified() bool { + return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) +} + +func (p Params) IntervalLowerBound(currentInterval uint64) uint64 { + intervalSize := p.PaymentInterval + var lowerBound uint64 + var target uint64 + for target < currentInterval { + lowerBound = target + target += intervalSize + intervalSize += p.PaymentIntervalIncrease + } + return lowerBound +} + +func (p Params) NextInterval(currentInterval uint64) uint64 { + intervalSize := p.PaymentInterval + var nextInterval uint64 + for nextInterval <= currentInterval { + nextInterval += intervalSize + intervalSize += p.PaymentIntervalIncrease + } + return nextInterval +} + +// NewParamsV0 generates parameters for a retrieval deal, which is always a whole piece deal +func NewParamsV0(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) Params { + return Params{ + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: big.Zero(), + } +} + +// NewParamsV1 generates parameters for a retrieval deal, including a selector +func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, sel ipld.Node, pieceCid *cid.Cid, unsealPrice abi.TokenAmount) (Params, error) { + var buffer bytes.Buffer + + if sel == nil { + return Params{}, xerrors.New("selector required for NewParamsV1") + } + + err := dagcbor.Encode(sel, &buffer) + if err != nil { + return Params{}, xerrors.Errorf("error encoding selector: %w", err) + } + + return Params{ + Selector: &cbg.Deferred{Raw: buffer.Bytes()}, + PieceCID: pieceCid, + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: unsealPrice, + }, nil +} + +// DealID is an identifier for a retrieval deal (unique to a client) +type DealID uint64 + +func (d DealID) String() string { + return fmt.Sprintf("%d", d) +} + +// DealProposal is a proposal for a new retrieval deal +type DealProposal struct { + PayloadCID cid.Cid + ID DealID + Params +} + +// Type method makes DealProposal usable as a voucher +func (dp *DealProposal) Type() datatransfer.TypeIdentifier { + return "RetrievalDealProposal/1" +} + +// DealProposalUndefined is an undefined deal proposal +var DealProposalUndefined = DealProposal{} + +// DealResponse is a response to a retrieval deal proposal +type DealResponse struct { + Status DealStatus + ID DealID + + // payment required to proceed + PaymentOwed abi.TokenAmount + + Message string +} + +// Type method makes DealResponse usable as a voucher result +func (dr *DealResponse) Type() datatransfer.TypeIdentifier { + return "RetrievalDealResponse/1" +} + +// DealResponseUndefined is an undefined deal response +var DealResponseUndefined = DealResponse{} + +// DealPayment is a payment for an in progress retrieval deal +type DealPayment struct { + ID DealID + PaymentChannel address.Address + PaymentVoucher *paychtypes.SignedVoucher +} + +// Type method makes DealPayment usable as a voucher +func (dr *DealPayment) Type() datatransfer.TypeIdentifier { + return "RetrievalDealPayment/1" +} + +// DealPaymentUndefined is an undefined deal payment +var DealPaymentUndefined = DealPayment{} + +var ( + // ErrNotFound means a piece was not found during retrieval + ErrNotFound = errors.New("not found") + + // ErrVerification means a retrieval contained a block response that did not verify + ErrVerification = errors.New("Error when verify data") +) + +type Ask struct { + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 +} + +// ShortfallErorr is an error that indicates a short fall of funds +type ShortfallError struct { + shortfall abi.TokenAmount +} + +// NewShortfallError returns a new error indicating a shortfall of funds +func NewShortfallError(shortfall abi.TokenAmount) error { + return ShortfallError{shortfall} +} + +// Shortfall returns the numerical value of the shortfall +func (se ShortfallError) Shortfall() abi.TokenAmount { + return se.shortfall +} +func (se ShortfallError) Error() string { + return fmt.Sprintf("Inssufficient Funds. Shortfall: %s", se.shortfall.String()) +} + +// ChannelAvailableFunds provides information about funds in a channel +type ChannelAvailableFunds struct { + // ConfirmedAmt is the amount of funds that have been confirmed on-chain + // for the channel + ConfirmedAmt abi.TokenAmount + // PendingAmt is the amount of funds that are pending confirmation on-chain + PendingAmt abi.TokenAmount + // PendingWaitSentinel can be used with PaychGetWaitReady to wait for + // confirmation of pending funds + PendingWaitSentinel *cid.Cid + // QueuedAmt is the amount that is queued up behind a pending request + QueuedAmt abi.TokenAmount + // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain + // and in the local datastore + VoucherReedeemedAmt abi.TokenAmount +} + +// PricingInput provides input parameters required to price a retrieval deal. +type PricingInput struct { + // PayloadCID is the cid of the payload to retrieve. + PayloadCID cid.Cid + // PieceCID is the cid of the Piece from which the Payload will be retrieved. + PieceCID cid.Cid + // PieceSize is the size of the Piece from which the payload will be retrieved. + PieceSize abi.UnpaddedPieceSize + // Client is the peerID of the retrieval client. + Client peer.ID + // VerifiedDeal is true if there exists a verified storage deal for the PayloadCID. + VerifiedDeal bool + // Unsealed is true if there exists an unsealed sector from which we can retrieve the given payload. + Unsealed bool + // CurrentAsk is the current configured ask in the ask-store. + CurrentAsk Ask +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_cbor_gen.go new file mode 100644 index 00000000000..ad6aef033a6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_cbor_gen.go @@ -0,0 +1,2906 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package retrievalmarket + +import ( + "fmt" + "io" + "math" + "sort" + + datatransfer "github.com/filecoin-project/go-data-transfer" + piecestore "github.com/filecoin-project/go-fil-markets/piecestore" + paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *Query) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.QueryParams (retrievalmarket.QueryParams) (struct) + if len("QueryParams") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"QueryParams\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("QueryParams"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("QueryParams")); err != nil { + return err + } + + if err := t.QueryParams.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Query) UnmarshalCBOR(r io.Reader) (err error) { + *t = Query{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Query: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.QueryParams (retrievalmarket.QueryParams) (struct) + case "QueryParams": + + { + + if err := t.QueryParams.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.QueryParams: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *QueryResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{169}); err != nil { + return err + } + + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + if len("PieceCIDFound") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCIDFound\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCIDFound"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCIDFound")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceCIDFound)); err != nil { + return err + } + + // t.Size (uint64) (uint64) + if len("Size") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Size\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Size")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.PaymentAddress (address.Address) (struct) + if len("PaymentAddress") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentAddress\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentAddress"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentAddress")); err != nil { + return err + } + + if err := t.PaymentAddress.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPricePerByte (big.Int) (struct) + if len("MinPricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinPricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinPricePerByte")); err != nil { + return err + } + + if err := t.MinPricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.MaxPaymentInterval (uint64) (uint64) + if len("MaxPaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval)); err != nil { + return err + } + + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + if len("MaxPaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *QueryResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = QueryResponseStatus(extra) + + } + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + case "PieceCIDFound": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceCIDFound = QueryItemStatus(extra) + + } + // t.Size (uint64) (uint64) + case "Size": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.PaymentAddress (address.Address) (struct) + case "PaymentAddress": + + { + + if err := t.PaymentAddress.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) + } + + } + // t.MinPricePerByte (big.Int) (struct) + case "MinPricePerByte": + + { + + if err := t.MinPricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) + } + + } + // t.MaxPaymentInterval (uint64) (uint64) + case "MaxPaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentInterval = uint64(extra) + + } + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + case "MaxPaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentIntervalIncrease = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealProposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Params (retrievalmarket.Params) (struct) + if len("Params") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Params\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Params"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Params")); err != nil { + return err + } + + if err := t.Params.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealProposal) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealProposal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealProposal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.Params (retrievalmarket.Params) (struct) + case "Params": + + { + + if err := t.Params.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Params: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentOwed (big.Int) (struct) + if len("PaymentOwed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentOwed\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentOwed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentOwed")); err != nil { + return err + } + + if err := t.PaymentOwed.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + return nil +} + +func (t *DealResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.PaymentOwed (big.Int) (struct) + case "PaymentOwed": + + { + + if err := t.PaymentOwed.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Params) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.Selector (typegen.Deferred) (struct) + if len("Selector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Selector\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Selector"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Selector")); err != nil { + return err + } + + if err := t.Selector.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Params) UnmarshalCBOR(r io.Reader) (err error) { + *t = Params{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Params: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Selector (typegen.Deferred) (struct) + case "Selector": + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *QueryParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *QueryParams) UnmarshalCBOR(r io.Reader) (err error) { + *t = QueryParams{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryParams: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealPayment) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentChannel (address.Address) (struct) + if len("PaymentChannel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentChannel\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentChannel"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentChannel")); err != nil { + return err + } + + if err := t.PaymentChannel.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentVoucher (paych.SignedVoucher) (struct) + if len("PaymentVoucher") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentVoucher\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentVoucher"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentVoucher")); err != nil { + return err + } + + if err := t.PaymentVoucher.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealPayment) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealPayment{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealPayment: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.PaymentChannel (address.Address) (struct) + case "PaymentChannel": + + { + + if err := t.PaymentChannel.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) + } + + } + // t.PaymentVoucher (paych.SignedVoucher) (struct) + case "PaymentVoucher": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentVoucher = new(paych.SignedVoucher) + if err := t.PaymentVoucher.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ClientDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{181}); err != nil { + return err + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if len("LastPaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LastPaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LastPaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LastPaymentRequested")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if len("AllBlocksReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AllBlocksReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AllBlocksReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AllBlocksReceived")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if len("TotalFunds") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalFunds\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalFunds"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalFunds")); err != nil { + return err + } + + if err := t.TotalFunds.MarshalCBOR(cw); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if len("ClientWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientWallet")); err != nil { + return err + } + + if err := t.ClientWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinerWallet (address.Address) (struct) + if len("MinerWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWallet\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWallet")); err != nil { + return err + } + + if err := t.MinerWallet.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + if len("PaymentInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInfo")); err != nil { + return err + } + + if err := t.PaymentInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Sender)); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + if len("TotalReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalReceived")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + if len("BytesPaidFor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BytesPaidFor\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("BytesPaidFor"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BytesPaidFor")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if len("PaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentRequested\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentRequested")); err != nil { + return err + } + + if err := t.PaymentRequested.MarshalCBOR(cw); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if len("FundsSpent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsSpent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsSpent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsSpent")); err != nil { + return err + } + + if err := t.FundsSpent.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if len("UnsealFundsPaid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealFundsPaid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealFundsPaid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealFundsPaid")); err != nil { + return err + } + + if err := t.UnsealFundsPaid.MarshalCBOR(cw); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + if len("WaitMsgCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitMsgCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitMsgCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WaitMsgCID")); err != nil { + return err + } + + if t.WaitMsgCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.VoucherShortfall (big.Int) (struct) + if len("VoucherShortfall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherShortfall\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VoucherShortfall"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VoucherShortfall")); err != nil { + return err + } + + if err := t.VoucherShortfall.MarshalCBOR(cw); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ClientDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ChannelID = new(datatransfer.ChannelID) + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID pointer: %w", err) + } + } + + } + // t.LastPaymentRequested (bool) (bool) + case "LastPaymentRequested": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.AllBlocksReceived (bool) (bool) + case "AllBlocksReceived": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TotalFunds (big.Int) (struct) + case "TotalFunds": + + { + + if err := t.TotalFunds.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.ClientWallet (address.Address) (struct) + case "ClientWallet": + + { + + if err := t.ClientWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.MinerWallet (address.Address) (struct) + case "MinerWallet": + + { + + if err := t.MinerWallet.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + case "PaymentInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(PaymentInfo) + if err := t.PaymentInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.TotalReceived (uint64) (uint64) + case "TotalReceived": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.BytesPaidFor (uint64) (uint64) + case "BytesPaidFor": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.PaymentRequested (big.Int) (struct) + case "PaymentRequested": + + { + + if err := t.PaymentRequested.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + case "FundsSpent": + + { + + if err := t.FundsSpent.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.UnsealFundsPaid (big.Int) (struct) + case "UnsealFundsPaid": + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + case "WaitMsgCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.VoucherShortfall (big.Int) (struct) + case "VoucherShortfall": + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{171}); err != nil { + return err + } + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.PieceInfo (piecestore.PieceInfo) (struct) + if len("PieceInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceInfo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceInfo")); err != nil { + return err + } + + if err := t.PieceInfo.MarshalCBOR(cw); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len("Receiver") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Receiver\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Receiver"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Receiver")); err != nil { + return err + } + + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + if len("TotalSent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSent\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TotalSent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalSent")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if len("FundsReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReceived\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReceived")); err != nil { + return err + } + + if err := t.FundsReceived.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (uint64) (uint64) + case "StoreID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = uint64(extra) + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.ChannelID = new(datatransfer.ChannelID) + if err := t.ChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID pointer: %w", err) + } + } + + } + // t.PieceInfo (piecestore.PieceInfo) (struct) + case "PieceInfo": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(piecestore.PieceInfo) + if err := t.PieceInfo.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + case "Receiver": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + case "TotalSent": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + case "FundsReceived": + + { + + if err := t.FundsReceived.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.PayCh (address.Address) (struct) + if len("PayCh") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayCh\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PayCh"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayCh")); err != nil { + return err + } + + if err := t.PayCh.MarshalCBOR(cw); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + if len("Lane") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Lane\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Lane"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Lane")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + return nil +} + +func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PaymentInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayCh (address.Address) (struct) + case "PayCh": + + { + + if err := t.PayCh.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PayCh: %w", err) + } + + } + // t.Lane (uint64) (uint64) + case "Lane": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *RetrievalPeer) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if len("Address") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Address\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Address"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Address")); err != nil { + return err + } + + if err := t.Address.MarshalCBOR(cw); err != nil { + return err + } + + // t.ID (peer.ID) (string) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ID)); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *RetrievalPeer) UnmarshalCBOR(r io.Reader) (err error) { + *t = RetrievalPeer{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("RetrievalPeer: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Address (address.Address) (struct) + case "Address": + + { + + if err := t.Address.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.ID (peer.ID) (string) + case "ID": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ID = peer.ID(sval) + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Ask) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(cw); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInterval")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Ask) UnmarshalCBOR(r io.Reader) (err error) { + *t = Ask{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Ask: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_test.go new file mode 100644 index 00000000000..47bd344b067 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/retrievalmarket/types_test.go @@ -0,0 +1,120 @@ +package retrievalmarket_test + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/ipld/go-ipld-prime/codec/dagcbor" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestParamsMarshalUnmarshal(t *testing.T) { + pieceCid := tut.GenerateCids(1)[0] + + allSelector := selectorparse.CommonSelector_ExploreAllRecursively + params, err := retrievalmarket.NewParamsV1(abi.NewTokenAmount(123), 456, 789, allSelector, &pieceCid, big.Zero()) + assert.NoError(t, err) + + buf := new(bytes.Buffer) + err = params.MarshalCBOR(buf) + assert.NoError(t, err) + + unmarshalled := &retrievalmarket.Params{} + err = unmarshalled.UnmarshalCBOR(buf) + assert.NoError(t, err) + + assert.Equal(t, params, *unmarshalled) + + nb := basicnode.Prototype.Any.NewBuilder() + err = dagcbor.Decode(nb, bytes.NewBuffer(unmarshalled.Selector.Raw)) + assert.NoError(t, err) + sel := nb.Build() + assert.Equal(t, sel, allSelector) +} + +func TestPricingInputMarshalUnmarshalJSON(t *testing.T) { + pid := test.RandPeerIDFatal(t) + + in := retrievalmarket.PricingInput{ + PayloadCID: tut.GenerateCids(1)[0], + PieceCID: tut.GenerateCids(1)[0], + PieceSize: abi.UnpaddedPieceSize(100), + Client: pid, + VerifiedDeal: true, + Unsealed: true, + CurrentAsk: retrievalmarket.Ask{ + PricePerByte: big.Zero(), + UnsealPrice: big.Zero(), + PaymentInterval: 0, + PaymentIntervalIncrease: 0, + }, + } + + bz, err := json.Marshal(in) + require.NoError(t, err) + + resp2 := retrievalmarket.PricingInput{} + require.NoError(t, json.Unmarshal(bz, &resp2)) + + require.Equal(t, in, resp2) +} + +func TestParamsIntervalBounds(t *testing.T) { + testCases := []struct { + name string + currentInterval uint64 + paymentInterval uint64 + intervalIncrease uint64 + expLowerBound uint64 + expNextInterval uint64 + }{{ + currentInterval: 0, + paymentInterval: 10, + intervalIncrease: 5, + expLowerBound: 0, + expNextInterval: 10, + }, { + currentInterval: 10, + paymentInterval: 10, + intervalIncrease: 5, + expLowerBound: 0, + expNextInterval: 25, // 10 + (10 + 5) + }, { + currentInterval: 25, + paymentInterval: 10, + intervalIncrease: 5, + expLowerBound: 10, + expNextInterval: 45, // 10 + (10 + 5) + (10 + 5 + 5) + }, { + currentInterval: 45, + paymentInterval: 10, + intervalIncrease: 5, + expLowerBound: 25, + expNextInterval: 70, // 10 + (10 + 5) + (10 + 5 + 5) + (10 + 5 + 5 + 5) + }} + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + params := retrievalmarket.Params{ + PaymentInterval: tc.paymentInterval, + PaymentIntervalIncrease: tc.intervalIncrease, + } + lowerBound := params.IntervalLowerBound(tc.currentInterval) + nextInterval := params.NextInterval(tc.currentInterval) + + require.Equal(t, tc.expLowerBound, lowerBound) + require.Equal(t, tc.expNextInterval, nextInterval) + }) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/fiximports b/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/fiximports new file mode 100644 index 00000000000..b3ba69f7665 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/fiximports @@ -0,0 +1,14 @@ +#!/bin/bash + +find . -type f -name \*.go -not -name \*_cbor_gen.go | xargs -I '{}' sed -i.bak -e '/import (/ { + :1 + $!N + s/\n\n/\'$'\n''/ + /)/!b1 +}' '{}' +cd extern/filecoin-ffi ; git clean -fd ; cd ../.. ; git clean -fd +find . -type f -name \*.go -not -name \*_cbor_gen.go | xargs -I '{}' goimports -w -local "github.com/filecoin-project" '{}' +find . -type f -name \*.go -not -name \*_cbor_gen.go | xargs -I '{}' goimports -w -local "github.com/filecoin-project/go-fil-markets" '{}' +cd extern/filecoin-ffi ; git checkout . ; cd ../.. + + diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/mkreleaselog b/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/mkreleaselog new file mode 100644 index 00000000000..ff90a72049e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/scripts/mkreleaselog @@ -0,0 +1,245 @@ +#!/bin/zsh + +# Note: This script is a modified version of the mkreleaselog script used by +# the go-ipfs team. +# +# Usage: ./mkreleaselog v0.25.0 v0.26.0 > /tmp/release.log + +set -euo pipefail +export GO111MODULE=on +export GOPATH="$(go env GOPATH)" + +alias jq="jq --unbuffered" + +REPO_SUFFIXES_TO_STRIP=( + "/v2" + "/v3" + "/v4" + "/v5" + "/v6" +) + +AUTHORS=( + # orgs + filecoin-project/go-fil-markets + + # Authors of personal repos used by filecoin-ffi that should be mentioned in the + # release notes. + xlab + hannahhoward +) + +[[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})" +echo $REPO_FILTER +[[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$' + +NL=$'\n' + +msg() { + echo "$*" >&2 +} + +statlog() { + rpath="$GOPATH/src/$1" + for s in $REPO_SUFFIXES_TO_STRIP; do + rpath=${rpath%$s} + done + + start="${2:-}" + end="${3:-HEAD}" + + git -C "$rpath" log --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while + read hash + read name + read email + read _ # empty line + read changes + do + changed=0 + insertions=0 + deletions=0 + while read count event; do + if [[ "$event" =~ ^file ]]; then + changed=$count + elif [[ "$event" =~ ^insertion ]]; then + insertions=$count + elif [[ "$event" =~ ^deletion ]]; then + deletions=$count + else + echo "unknown event $event" >&2 + exit 1 + fi + done<<<"${changes//,/$NL}" + + jq -n \ + --arg "hash" "$hash" \ + --arg "name" "$name" \ + --arg "email" "$email" \ + --argjson "changed" "$changed" \ + --argjson "insertions" "$insertions" \ + --argjson "deletions" "$deletions" \ + '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' + done +} + +# Returns a stream of deps changed between $1 and $2. +dep_changes() { + { + <"$1" + <"$2" + } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' +} + +# resolve_commits resolves a git ref for each version. +resolve_commits() { + jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}' +} + +pr_link() { + local repo="$1" + local prnum="$2" + local ghname="${repo##github.com/}" + printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum" +} + +# Generate a release log for a range of commits in a single repo. +release_log() { + setopt local_options BASH_REMATCH + + local repo="$1" + local start="$2" + local end="${3:-HEAD}" + local dir="$GOPATH/src/$repo" + + local commit pr + git -C "$dir" log \ + --format='tformat:%H %s' \ + --first-parent \ + "$start..$end" | + while read commit subject; do + # Skip gx-only PRs. + git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | + grep -v "${IGNORED_FILES}" >/dev/null || continue + + if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then + local prnum="${BASH_REMATCH[2]}" + local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" + printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" + elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then + local prnum="${BASH_REMATCH[2]}" + printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" + else + printf -- "- %s\n" "$subject" + fi + done +} + +indent() { + sed -e 's/^/ /' +} + +mod_deps() { + go list -mod=mod -json -m all | jq 'select(.Version != null)' +} + +ensure() { + local repo="$1" + for s in $REPO_SUFFIXES_TO_STRIP; do + repo=${repo%$s} + done + + local commit="$2" + + local rpath="$GOPATH/src/$repo" + if [[ ! -d "$rpath" ]]; then + msg "Cloning $repo..." + git clone "http://$repo" "$rpath" >&2 + fi + + if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then + msg "Fetching $repo..." + git -C "$rpath" fetch --all >&2 + fi + + git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 +} + +statsummary() { + jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | + jq '. + {Lines: (.Deletions + .Insertions)}' +} + +recursive_release_log() { + local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" + local end="${2:-$(git rev-parse HEAD)}" + local repo_root="$(git rev-parse --show-toplevel)" + local package="$(cd "$repo_root" && go list -m)" + + if ! [[ "${GOPATH}/${package}" != "${repo_root}" ]]; then + echo "This script requires the target package and all dependencies to live in a GOPATH." + return 1 + fi + + ( + local result=0 + local workspace="$(mktemp -d)" + trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT + cd "$workspace" + + echo "Computing old deps..." >&2 + git -C "$repo_root" show "$start:go.mod" >go.mod + sed "s/^replace.*//g" go.mod > go.mod.new + mv go.mod.new go.mod + mod_deps | resolve_commits | jq -s > old_deps.json + + echo "Computing new deps..." >&2 + git -C "$repo_root" show "$end:go.mod" >go.mod + sed "s/^replace.*//g" go.mod > go.mod.new + mv go.mod.new go.mod + mod_deps | resolve_commits | jq -s > new_deps.json + + rm -f go.mod go.sum + + printf -- "Generating Changelog for %s %s..%s\n" "$package" "$start" "$end" >&2 + + printf -- "- %s:\n" "$package" + release_log "$package" "$start" "$end" | indent + + statlog "$package" "$start" "$end" > statlog.json + + dep_changes old_deps.json new_deps.json | + jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' | + # Compute changelogs + jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | + while read repo new new_ref old old_ref; do + for s in $REPO_SUFFIXES_TO_STRIP; do + repo=${repo%$s} + done + + if ! ensure "$repo" "$new_ref"; then + result=1 + local changelog="failed to fetch repo" + else + statlog "$repo" "$old_ref" "$new_ref" >> statlog.json + local changelog="$(release_log "$repo" "$old_ref" "$new_ref")" + fi + if [[ -n "$changelog" ]]; then + printf -- "- %s (%s -> %s):\n" "$repo" "$old" "$new" + echo "$changelog" | indent + fi + done + + echo + echo "Contributors" + echo + + echo "| Contributor | Commits | Lines ± | Files Changed |" + echo "|-------------|---------|---------|---------------|" + statsummary = impl.maxStreamOpenAttempts { + return nil, xerrors.Errorf("exhausted %d attempts but failed to open stream, err: %w", int(impl.maxStreamOpenAttempts), err) + } + + duration := b.Duration() + log.Warnf("failed to open stream to %s on attempt %.0f of %.0f, waiting %s to try again, err: %s", + id, nAttempts, impl.maxStreamOpenAttempts, duration, err) + + ebt := time.NewTimer(duration) + select { + case <-ctx.Done(): + ebt.Stop() + return nil, xerrors.Errorf("open stream to %s canceled by context", id) + case <-ebt.C: + } + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared/retrystream_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/retrystream_test.go new file mode 100644 index 00000000000..f0e92bbf1d2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/retrystream_test.go @@ -0,0 +1,74 @@ +package shared + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestRetryStream(t *testing.T) { + tcases := []struct { + attempts int + errors int + expSuccess bool + }{{ + attempts: 1, + errors: 0, + expSuccess: true, + }, { + attempts: 1, + errors: 1, + expSuccess: false, + }, { + attempts: 2, + errors: 1, + expSuccess: true, + }, { + attempts: 2, + errors: 2, + expSuccess: false, + }} + for _, tcase := range tcases { + name := fmt.Sprintf("%d attempts, %d errors", tcase.attempts, tcase.errors) + t.Run(name, func(t *testing.T) { + opener := &mockOpener{ + errs: make(chan error, tcase.errors), + } + for i := 0; i < tcase.errors; i++ { + opener.errs <- xerrors.Errorf("network err") + } + params := RetryParameters( + time.Millisecond, + time.Millisecond, + float64(tcase.attempts), + 1) + rs := NewRetryStream(opener, params) + _, err := rs.OpenStream(context.Background(), peer.ID("peer1"), []protocol.ID{"proto1"}) + if tcase.expSuccess { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +type mockOpener struct { + errs chan error +} + +func (o *mockOpener) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { + select { + case e := <-o.errs: + return nil, e + default: + return nil, nil + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared/selectors.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/selectors.go new file mode 100644 index 00000000000..9422d29f93b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/selectors.go @@ -0,0 +1,10 @@ +package shared + +import ( + "github.com/ipld/go-ipld-prime" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" +) + +// Deprecated: AllSelector is a compatibility alias for an entire DAG non-matching-selector. +// Use github.com/ipld/go-ipld-prime/traversal/selector/parse.CommonSelector_ExploreAllRecursively instead. +func AllSelector() ipld.Node { return selectorparse.CommonSelector_ExploreAllRecursively } diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter.go new file mode 100644 index 00000000000..7f27b4ee384 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter.go @@ -0,0 +1,21 @@ +package shared + +import ( + "sync/atomic" + "time" +) + +// timeCounter is used to generate a monotonically increasing sequence. +// It starts at the current time, then increments on each call to next. +type TimeCounter struct { + counter uint64 +} + +func NewTimeCounter() *TimeCounter { + return &TimeCounter{counter: uint64(time.Now().UnixNano())} +} + +func (tc *TimeCounter) Next() uint64 { + counter := atomic.AddUint64(&tc.counter, 1) + return counter +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter_test.go new file mode 100644 index 00000000000..0ac63acced0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/timecounter_test.go @@ -0,0 +1,48 @@ +package shared + +import ( + "sync" + "testing" + "time" +) + +func TestTimeCounter(t *testing.T) { + // Test that counter increases between restarts + tc1 := NewTimeCounter() + time.Sleep(time.Millisecond) + tc2 := NewTimeCounter() + tc1Next := tc1.Next() + tc2Next := tc2.Next() + if tc2Next <= tc1Next { + t.Fatal("counter should increase for each new counter generator", tc1Next, tc2Next) + } + + // Test that the counter always increases + for i := 0; i < 100; i++ { + first := tc1.Next() + second := tc1.Next() + if second <= first { + t.Fatal("counter should increase monotonically", first, second) + } + } + + // Test that the counter is thread-safe + count := 1000 + threads := 20 + counter := tc1.Next() + var wg sync.WaitGroup + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + for i := 0; i < count; i++ { + tc1.Next() + } + wg.Done() + }() + } + wg.Wait() + + if tc1.Next() != counter+uint64(threads*count+1) { + t.Fatal("Next() is not thread safe") + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/types.go new file mode 100644 index 00000000000..7f752a236cf --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared/types.go @@ -0,0 +1,7 @@ +package shared + +// TipSetToken is the implementation-nonspecific identity for a tipset. +type TipSetToken []byte + +// Unsubscribe is a function that gets called to unsubscribe from (storage|retrieval)market events +type Unsubscribe func() diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/files.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/files.go new file mode 100644 index 00000000000..67bf0cc3666 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/files.go @@ -0,0 +1,85 @@ +package shared_testutil + +import ( + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + bstore "github.com/ipfs/go-ipfs-blockstore" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car/v2/blockstore" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/shared_testutil/unixfs" + "github.com/filecoin-project/go-fil-markets/stores" +) + +func ThisDir(t *testing.T) string { + _, fname, _, ok := runtime.Caller(1) + require.True(t, ok) + return filepath.Dir(fname) +} + +// CreateDenseCARv2 generates a "dense" UnixFS CARv2 from the supplied ordinary file. +// A dense UnixFS CARv2 is one storing leaf data. Contrast to CreateRefCARv2. +func CreateDenseCARv2(t *testing.T, src string) (root cid.Cid, path string) { + bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + + root = unixfs.WriteUnixfsDAGTo(t, src, dagSvc) + + // Create a UnixFS DAG again AND generate a CARv2 file using a CARv2 + // read-write blockstore now that we have the root. + out, err := os.CreateTemp("", "rand") + require.NoError(t, err) + require.NoError(t, out.Close()) + + t.Cleanup(func() { os.Remove(out.Name()) }) + + rw, err := blockstore.OpenReadWrite(out.Name(), []cid.Cid{root}, blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + + dagSvc = merkledag.NewDAGService(blockservice.New(rw, offline.Exchange(rw))) + + root2 := unixfs.WriteUnixfsDAGTo(t, src, dagSvc) + require.NoError(t, rw.Finalize()) + require.Equal(t, root, root2) + + return root, out.Name() +} + +// CreateRefCARv2 generates a "ref" CARv2 from the supplied ordinary file. +// A "ref" CARv2 is one that stores leaf data as positional references to the original file. +func CreateRefCARv2(t *testing.T, src string) (cid.Cid, string) { + bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + + root := unixfs.WriteUnixfsDAGTo(t, src, dagSvc) + path := genRefCARv2(t, src, root) + + return root, path +} + +func genRefCARv2(t *testing.T, path string, root cid.Cid) string { + tmp, err := os.CreateTemp("", "rand") + require.NoError(t, err) + require.NoError(t, tmp.Close()) + + fs, err := stores.ReadWriteFilestore(tmp.Name(), root) + require.NoError(t, err) + + dagSvc := merkledag.NewDAGService(blockservice.New(fs, offline.Exchange(fs))) + + root2 := unixfs.WriteUnixfsDAGTo(t, path, dagSvc) + require.NoError(t, fs.Close()) + require.Equal(t, root, root2) + + // return the path of the CARv2 file. + return tmp.Name() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/generators.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/generators.go new file mode 100644 index 00000000000..745fae2918b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/generators.go @@ -0,0 +1,304 @@ +package shared_testutil + +import ( + "math/rand" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/test" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +// MakeTestSignedVoucher generates a random SignedVoucher that has all non-zero fields +func MakeTestSignedVoucher() *paych.SignedVoucher { + return &paych.SignedVoucher{ + ChannelAddr: address.TestAddress, + TimeLockMin: abi.ChainEpoch(rand.Int63()), + TimeLockMax: 0, + SecretHash: []byte("secret-preimage"), + Extra: MakeTestModVerifyParams(), + Lane: rand.Uint64(), + Nonce: rand.Uint64(), + Amount: MakeTestTokenAmount(), + Merges: []paych.Merge{MakeTestMerge()}, + Signature: MakeTestSignature(), + } +} + +// MakeTestModVerifyParams generates a random ModVerifyParams that has all non-zero fields +func MakeTestModVerifyParams() *paych.ModVerifyParams { + return &paych.ModVerifyParams{ + Actor: address.TestAddress, + Method: abi.MethodNum(rand.Int63()), + Data: []byte("ModVerifyParams data"), + } +} + +// MakeTestMerge generates a random Merge that has all non-zero fields +func MakeTestMerge() paych.Merge { + return paych.Merge{ + Lane: rand.Uint64(), + Nonce: rand.Uint64(), + } +} + +// MakeTestSignature generates a valid yet random Signature with all non-zero fields +func MakeTestSignature() *crypto.Signature { + return &crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: []byte("signature data"), + } +} + +// MakeTestTokenAmount generates a valid yet random TokenAmount with a non-zero value. +func MakeTestTokenAmount() abi.TokenAmount { + return abi.TokenAmount(big.NewInt(rand.Int63())) +} + +// MakeTestQueryResponse generates a valid, random QueryResponse with no non-zero fields +func MakeTestQueryResponse() retrievalmarket.QueryResponse { + return retrievalmarket.QueryResponse{ + Status: retrievalmarket.QueryResponseUnavailable, + Size: rand.Uint64(), + PaymentAddress: address.TestAddress2, + MinPricePerByte: MakeTestTokenAmount(), + MaxPaymentInterval: rand.Uint64(), + MaxPaymentIntervalIncrease: rand.Uint64(), + UnsealPrice: big.Zero(), + } +} + +// MakeTestDealProposal generates a valid, random DealProposal +func MakeTestDealProposal() retrievalmarket.DealProposal { + cid := GenerateCids(1)[0] + return retrievalmarket.DealProposal{ + PayloadCID: cid, + ID: retrievalmarket.DealID(rand.Uint64()), + Params: retrievalmarket.NewParamsV0(MakeTestTokenAmount(), rand.Uint64(), rand.Uint64()), + } +} + +// MakeTestChannelID makes a new empty data transfer channel ID +func MakeTestChannelID() datatransfer.ChannelID { + testPeers := GeneratePeers(2) + transferID := datatransfer.TransferID(rand.Uint64()) + return datatransfer.ChannelID{ID: transferID, Initiator: testPeers[0], Responder: testPeers[1]} +} + +// MakeTestUnsignedDealProposal generates a deal proposal with no signature +func MakeTestUnsignedDealProposal() market.DealProposal { + start := uint64(rand.Int31()) + end := start + uint64(rand.Int31()) + l, _ := market.NewLabelFromString("") + + return market.DealProposal{ + PieceCID: GenerateCids(1)[0], + PieceSize: abi.PaddedPieceSize(rand.Int63()), + + Client: address.TestAddress, + Provider: address.TestAddress2, + Label: l, + + StartEpoch: abi.ChainEpoch(start), + EndEpoch: abi.ChainEpoch(end), + + StoragePricePerEpoch: MakeTestTokenAmount(), + ProviderCollateral: MakeTestTokenAmount(), + ClientCollateral: MakeTestTokenAmount(), + } +} + +// MakeTestClientDealProposal generates a valid storage deal proposal +func MakeTestClientDealProposal() *market.ClientDealProposal { + return &market.ClientDealProposal{ + Proposal: MakeTestUnsignedDealProposal(), + ClientSignature: *MakeTestSignature(), + } +} + +// MakeTestDataRef returns a storage market data ref +func MakeTestDataRef(manualXfer bool) *storagemarket.DataRef { + out := &storagemarket.DataRef{ + Root: GenerateCids(1)[0], + } + + if manualXfer { + out.TransferType = storagemarket.TTManual + } + + return out +} + +// MakeTestClientDeal returns a storage market client deal +func MakeTestClientDeal(state storagemarket.StorageDealStatus, clientDealProposal *market.ClientDealProposal, manualXfer bool) (*storagemarket.ClientDeal, error) { + proposalNd, err := cborutil.AsIpld(clientDealProposal) + + if err != nil { + return nil, err + } + + p, err := test.RandPeerID() + if err != nil { + return nil, err + } + return &storagemarket.ClientDeal{ + ProposalCid: proposalNd.Cid(), + ClientDealProposal: *clientDealProposal, + State: state, + Miner: p, + MinerWorker: address.TestAddress2, + DataRef: MakeTestDataRef(manualXfer), + DealStages: storagemarket.NewDealStages(), + }, nil +} + +// MakeTestMinerDeal returns a storage market provider deal +func MakeTestMinerDeal(state storagemarket.StorageDealStatus, clientDealProposal *market.ClientDealProposal, dataRef *storagemarket.DataRef) (*storagemarket.MinerDeal, error) { + proposalNd, err := cborutil.AsIpld(clientDealProposal) + + if err != nil { + return nil, err + } + + p, err := test.RandPeerID() + if err != nil { + return nil, err + } + + return &storagemarket.MinerDeal{ + ProposalCid: proposalNd.Cid(), + ClientDealProposal: *clientDealProposal, + State: state, + Client: p, + Ref: dataRef, + }, nil +} + +// MakeTestStorageAsk generates a storage ask +func MakeTestStorageAsk() *storagemarket.StorageAsk { + return &storagemarket.StorageAsk{ + Price: MakeTestTokenAmount(), + VerifiedPrice: MakeTestTokenAmount(), + MinPieceSize: abi.PaddedPieceSize(rand.Uint64()), + Miner: address.TestAddress2, + Timestamp: abi.ChainEpoch(rand.Int63()), + Expiry: abi.ChainEpoch(rand.Int63()), + SeqNo: rand.Uint64(), + } +} + +// MakeTestSignedStorageAsk generates a signed storage ask +func MakeTestSignedStorageAsk() *storagemarket.SignedStorageAsk { + return &storagemarket.SignedStorageAsk{ + Ask: MakeTestStorageAsk(), + Signature: MakeTestSignature(), + } +} + +// MakeTestStorageNetworkProposal generates a proposal that can be sent over the +// network to a provider +func MakeTestStorageNetworkProposal() smnet.Proposal { + return smnet.Proposal{ + DealProposal: MakeTestClientDealProposal(), + Piece: &storagemarket.DataRef{Root: GenerateCids(1)[0]}, + } +} + +// MakeTestStorageNetworkResponse generates a response to a proposal sent over +// the network +func MakeTestStorageNetworkResponse() smnet.Response { + return smnet.Response{ + State: storagemarket.StorageDealSealing, + Proposal: GenerateCids(1)[0], + PublishMessage: &(GenerateCids(1)[0]), + } +} + +// MakeTestStorageNetworkSignedResponse generates a response to a proposal sent over +// the network that is signed +func MakeTestStorageNetworkSignedResponse() smnet.SignedResponse { + return smnet.SignedResponse{ + Response: MakeTestStorageNetworkResponse(), + Signature: MakeTestSignature(), + } +} + +// MakeTestStorageAskRequest generates a request to get a provider's ask +func MakeTestStorageAskRequest() smnet.AskRequest { + return smnet.AskRequest{ + Miner: address.TestAddress2, + } +} + +// MakeTestStorageAskResponse generates a response to an ask request +func MakeTestStorageAskResponse() smnet.AskResponse { + return smnet.AskResponse{ + Ask: MakeTestSignedStorageAsk(), + } +} + +// MakeTestDealStatusRequest generates a request to get a provider's query +func MakeTestDealStatusRequest() smnet.DealStatusRequest { + return smnet.DealStatusRequest{ + Proposal: GenerateCids(1)[0], + Signature: *MakeTestSignature(), + } +} + +// MakeTestDealStatusResponse generates a response to an query request +func MakeTestDealStatusResponse() smnet.DealStatusResponse { + proposal := MakeTestUnsignedDealProposal() + + ds := storagemarket.ProviderDealState{ + Proposal: &proposal, + ProposalCid: &GenerateCids(1)[0], + State: storagemarket.StorageDealActive, + } + + return smnet.DealStatusResponse{ + DealState: ds, + Signature: *MakeTestSignature(), + } +} + +func RequireGenerateRetrievalPeers(t *testing.T, numPeers int) []retrievalmarket.RetrievalPeer { + peers := make([]retrievalmarket.RetrievalPeer, numPeers) + for i := range peers { + pid, err := test.RandPeerID() + require.NoError(t, err) + addr, err := address.NewIDAddress(uint64(rand.Int63())) + require.NoError(t, err) + peers[i] = retrievalmarket.RetrievalPeer{ + Address: addr, + ID: pid, + } + } + return peers +} + +type FakeDTValidator struct{} + +func (v *FakeDTValidator) ValidatePush(isRestart bool, _ datatransfer.ChannelID, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + return nil, nil +} + +func (v *FakeDTValidator) ValidatePull(isRestart bool, _ datatransfer.ChannelID, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + return nil, nil +} + +var _ datatransfer.RequestValidator = (*FakeDTValidator)(nil) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockdagstorewrapper.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockdagstorewrapper.go new file mode 100644 index 00000000000..74f9a4f4212 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockdagstorewrapper.go @@ -0,0 +1,176 @@ +package shared_testutil + +import ( + "context" + "io" + "os" + "sync" + + "github.com/ipfs/go-cid" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + carindex "github.com/ipld/go-car/v2/index" + "golang.org/x/xerrors" + + "github.com/filecoin-project/dagstore" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/stores" +) + +type registration struct { + CarPath string + EagerInit bool +} + +// MockDagStoreWrapper is used to mock out the DAG store wrapper operations +// for the tests. +// It simulates getting deal info from a piece store and unsealing the data for +// the deal from a retrieval provider node. +type MockDagStoreWrapper struct { + pieceStore piecestore.PieceStore + sa retrievalmarket.SectorAccessor + + lk sync.Mutex + registrations map[cid.Cid]registration + piecesWithBlock map[cid.Cid][]cid.Cid +} + +var _ stores.DAGStoreWrapper = (*MockDagStoreWrapper)(nil) + +func NewMockDagStoreWrapper(pieceStore piecestore.PieceStore, sa retrievalmarket.SectorAccessor) *MockDagStoreWrapper { + return &MockDagStoreWrapper{ + pieceStore: pieceStore, + sa: sa, + registrations: make(map[cid.Cid]registration), + piecesWithBlock: make(map[cid.Cid][]cid.Cid), + } +} + +func (m *MockDagStoreWrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error { + m.lk.Lock() + defer m.lk.Unlock() + + m.registrations[pieceCid] = registration{ + CarPath: carPath, + EagerInit: eagerInit, + } + + resch <- dagstore.ShardResult{} + return nil +} + +func (m *MockDagStoreWrapper) DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error { + m.lk.Lock() + defer m.lk.Unlock() + delete(m.registrations, pieceCid) + resch <- dagstore.ShardResult{} + return nil +} + +func (m *MockDagStoreWrapper) GetIterableIndexForPiece(c cid.Cid) (carindex.IterableIndex, error) { + return nil, nil +} + +func (m *MockDagStoreWrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) { + return true, nil +} + +func (m *MockDagStoreWrapper) LenRegistrations() int { + m.lk.Lock() + defer m.lk.Unlock() + + return len(m.registrations) +} + +func (m *MockDagStoreWrapper) GetRegistration(pieceCid cid.Cid) (registration, bool) { + m.lk.Lock() + defer m.lk.Unlock() + + reg, ok := m.registrations[pieceCid] + return reg, ok +} + +func (m *MockDagStoreWrapper) ClearRegistrations() { + m.lk.Lock() + defer m.lk.Unlock() + + m.registrations = make(map[cid.Cid]registration) +} + +func (m *MockDagStoreWrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) { + m.lk.Lock() + defer m.lk.Unlock() + + _, ok := m.registrations[pieceCid] + if !ok { + return nil, xerrors.Errorf("no shard for piece CID %s", pieceCid) + } + + // Get the piece info from the piece store + pi, err := m.pieceStore.GetPieceInfo(pieceCid) + if err != nil { + return nil, err + } + + // Unseal the sector data for the deal + deal := pi.Deals[0] + r, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + if err != nil { + return nil, xerrors.Errorf("error unsealing deal for piece %s: %w", pieceCid, err) + } + + return getBlockstoreFromReader(r, pieceCid) +} + +func getBlockstoreFromReader(r io.ReadCloser, pieceCid cid.Cid) (stores.ClosableBlockstore, error) { + // Write the piece to a file + tmpFile, err := os.CreateTemp("", "dagstoretmp") + if err != nil { + return nil, xerrors.Errorf("creating temp file for piece CID %s: %w", pieceCid, err) + } + + _, err = io.Copy(tmpFile, r) + if err != nil { + return nil, xerrors.Errorf("copying read stream to temp file for piece CID %s: %w", pieceCid, err) + } + + err = tmpFile.Close() + if err != nil { + return nil, xerrors.Errorf("closing temp file for piece CID %s: %w", pieceCid, err) + } + + // Get a blockstore from the CAR file + return blockstore.OpenReadOnly(tmpFile.Name(), carv2.ZeroLengthSectionAsEOF(true), blockstore.UseWholeCIDs(true)) +} + +func (m *MockDagStoreWrapper) Close() error { + return nil +} + +func (m *MockDagStoreWrapper) GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) { + m.lk.Lock() + defer m.lk.Unlock() + + pieces, ok := m.piecesWithBlock[blockCID] + if !ok { + return nil, retrievalmarket.ErrNotFound + } + + return pieces, nil +} + +// Used by the tests to add an entry to the index of block CID -> []piece CID +func (m *MockDagStoreWrapper) AddBlockToPieceIndex(blockCID cid.Cid, pieceCid cid.Cid) { + m.lk.Lock() + defer m.lk.Unlock() + + pieces, ok := m.piecesWithBlock[blockCID] + if !ok { + m.piecesWithBlock[blockCID] = []cid.Cid{pieceCid} + } else { + m.piecesWithBlock[blockCID] = append(pieces, pieceCid) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockindexprovider.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockindexprovider.go new file mode 100644 index 00000000000..e408090ddf4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mockindexprovider.go @@ -0,0 +1,56 @@ +package shared_testutil + +import ( + "context" + "sync" + + "github.com/ipfs/go-cid" + + provider "github.com/filecoin-project/index-provider" + "github.com/filecoin-project/index-provider/metadata" +) + +type MockIndexProvider struct { + provider.Interface + + lk sync.Mutex + callback provider.MultihashLister + notifs map[string]metadata.Metadata +} + +func NewMockIndexProvider() *MockIndexProvider { + return &MockIndexProvider{ + notifs: make(map[string]metadata.Metadata), + } + +} + +func (m *MockIndexProvider) RegisterMultihashLister(cb provider.MultihashLister) { + m.lk.Lock() + defer m.lk.Unlock() + + m.callback = cb +} + +func (m *MockIndexProvider) NotifyPut(ctx context.Context, contextID []byte, metadata metadata.Metadata) (cid.Cid, error) { + m.lk.Lock() + defer m.lk.Unlock() + + m.notifs[string(contextID)] = metadata + + return cid.Undef, nil +} + +func (m *MockIndexProvider) NotifyRemove(ctx context.Context, contextID []byte) (cid.Cid, error) { + m.lk.Lock() + defer m.lk.Unlock() + + return cid.Undef, nil +} + +func (m *MockIndexProvider) GetNotifs() map[string]metadata.Metadata { + m.lk.Lock() + defer m.lk.Unlock() + + return m.notifs +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mocknet.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mocknet.go new file mode 100644 index 00000000000..667b522e863 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/mocknet.go @@ -0,0 +1,188 @@ +package shared_testutil + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dss "github.com/ipfs/go-datastore/sync" + "github.com/ipfs/go-graphsync/storeutil" + bstore "github.com/ipfs/go-ipfs-blockstore" + offline "github.com/ipfs/go-ipfs-exchange-offline" + files "github.com/ipfs/go-ipfs-files" + ipldformat "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p-core/host" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + + dtnet "github.com/filecoin-project/go-data-transfer/network" + + "github.com/filecoin-project/go-fil-markets/shared_testutil/unixfs" +) + +type Libp2pTestData struct { + Ctx context.Context + Ds1 datastore.Batching + Ds2 datastore.Batching + Bs1 bstore.Blockstore + Bs2 bstore.Blockstore + DagService1 ipldformat.DAGService + DagService2 ipldformat.DAGService + DTNet1 dtnet.DataTransferNetwork + DTNet2 dtnet.DataTransferNetwork + DTStore1 datastore.Batching + DTStore2 datastore.Batching + DTTmpDir1 string + DTTmpDir2 string + LinkSystem1 ipld.LinkSystem + LinkSystem2 ipld.LinkSystem + Host1 host.Host + Host2 host.Host + OrigBytes []byte + + MockNet mocknet.Mocknet +} + +func NewLibp2pTestData(ctx context.Context, t *testing.T) *Libp2pTestData { + testData := &Libp2pTestData{} + testData.Ctx = ctx + + var err error + + testData.Ds1 = dss.MutexWrap(datastore.NewMapDatastore()) + testData.Ds2 = dss.MutexWrap(datastore.NewMapDatastore()) + + // make a bstore and dag service + testData.Bs1 = bstore.NewBlockstore(testData.Ds1) + testData.Bs2 = bstore.NewBlockstore(testData.Ds2) + + testData.DagService1 = merkledag.NewDAGService(blockservice.New(testData.Bs1, offline.Exchange(testData.Bs1))) + testData.DagService2 = merkledag.NewDAGService(blockservice.New(testData.Bs2, offline.Exchange(testData.Bs2))) + + // setup an IPLD link system for bstore 1 + testData.LinkSystem1 = storeutil.LinkSystemForBlockstore(testData.Bs1) + + // setup an IPLD link system for bstore 2 + testData.LinkSystem2 = storeutil.LinkSystemForBlockstore(testData.Bs2) + + mn := mocknet.New() + + // setup network + testData.Host1, err = mn.GenPeer() + require.NoError(t, err) + + testData.Host2, err = mn.GenPeer() + require.NoError(t, err) + + err = mn.LinkAll() + require.NoError(t, err) + + testData.DTNet1 = dtnet.NewFromLibp2pHost(testData.Host1) + testData.DTNet2 = dtnet.NewFromLibp2pHost(testData.Host2) + + testData.DTStore1 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer1")) + testData.DTStore2 = namespace.Wrap(testData.Ds1, datastore.NewKey("DataTransfer2")) + + testData.DTTmpDir1, err = ioutil.TempDir("", "dt-tmp-1") + require.NoError(t, err) + testData.DTTmpDir2, err = ioutil.TempDir("", "dt-tmp-2") + require.NoError(t, err) + t.Cleanup(func() { + _ = os.RemoveAll(testData.DTTmpDir1) + _ = os.RemoveAll(testData.DTTmpDir2) + }) + + testData.MockNet = mn + + return testData +} + +// LoadUnixFSFile injects the fixture `src` into the given blockstore from the +// fixtures directory. If useSecondNode is true, fixture is injected to the second node; +// otherwise the first node gets it +func (ltd *Libp2pTestData) LoadUnixFSFile(t *testing.T, src string, useSecondNode bool) (ipld.Link, string) { + var dagService ipldformat.DAGService + if useSecondNode { + dagService = ltd.DagService2 + } else { + dagService = ltd.DagService1 + } + return ltd.loadUnixFSFile(t, src, dagService) +} + +// LoadUnixFSFileToStore creates a CAR file from the fixture at `src` +func (ltd *Libp2pTestData) LoadUnixFSFileToStore(t *testing.T, src string) (ipld.Link, string) { + dstore := dss.MutexWrap(datastore.NewMapDatastore()) + bs := bstore.NewBlockstore(dstore) + dagService := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + + return ltd.loadUnixFSFile(t, src, dagService) +} + +func (ltd *Libp2pTestData) loadUnixFSFile(t *testing.T, src string, dagService ipldformat.DAGService) (ipld.Link, string) { + f, err := os.Open(src) + require.NoError(t, err) + + ltd.OrigBytes, err = ioutil.ReadAll(f) + require.NoError(t, err) + require.NotEmpty(t, ltd.OrigBytes) + + // generate a unixfs dag using the given dagService to get the root. + root := unixfs.WriteUnixfsDAGTo(t, src, dagService) + + // Create a UnixFS DAG again AND generate a CARv2 file that can be used to back a filestore. + path := genRefCARv2(t, src, root) + return cidlink.Link{Cid: root}, path +} + +// VerifyFileTransferred checks that the fixture file was sent from one node to the other. +func (ltd *Libp2pTestData) VerifyFileTransferred(t *testing.T, link ipld.Link, useSecondNode bool, readLen uint64) { + var dagService ipldformat.DAGService + if useSecondNode { + dagService = ltd.DagService2 + } else { + dagService = ltd.DagService1 + } + ltd.verifyFileTransferred(t, link, dagService, readLen) +} + +// VerifyFileTransferredIntoStore checks that the fixture file was sent from +// one node to the other, and stored in the given CAR file +func (ltd *Libp2pTestData) VerifyFileTransferredIntoStore(t *testing.T, link ipld.Link, bs bstore.Blockstore, readLen uint64) { + bsvc := blockservice.New(bs, offline.Exchange(bs)) + dagService := merkledag.NewDAGService(bsvc) + ltd.verifyFileTransferred(t, link, dagService, readLen) +} + +func (ltd *Libp2pTestData) verifyFileTransferred(t *testing.T, link ipld.Link, dagService ipldformat.DAGService, readLen uint64) { + c := link.(cidlink.Link).Cid + + // load the root of the UnixFS DAG from the new blockstore + otherNode, err := dagService.Get(ltd.Ctx, c) + require.NoError(t, err) + + // Setup a UnixFS file reader + n, err := unixfile.NewUnixfsFile(ltd.Ctx, dagService, otherNode) + require.NoError(t, err) + + fn, ok := n.(files.File) + require.True(t, ok) + + // Read the bytes for the UnixFS File + finalBytes := make([]byte, readLen) + _, err = fn.Read(finalBytes) + if err != nil { + require.Equal(t, "EOF", err.Error()) + } + + // verify original bytes match final bytes! + require.EqualValues(t, ltd.OrigBytes[:readLen], finalBytes) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/startandwait.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/startandwait.go new file mode 100644 index 00000000000..970ab22d0a5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/startandwait.go @@ -0,0 +1,31 @@ +package shared_testutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// StartAndWaitable is any interface that can be started up and will be asynchronously ready later +type StartAndWaitable interface { + Start(ctx context.Context) error + OnReady(shared.ReadyFunc) +} + +// StartAndWaitForReady is a utility function to start a module and verify it reaches the ready state +func StartAndWaitForReady(ctx context.Context, t *testing.T, startAndWaitable StartAndWaitable) { + ready := make(chan error, 1) + startAndWaitable.OnReady(func(err error) { + ready <- err + }) + require.NoError(t, startAndWaitable.Start(ctx)) + select { + case <-ctx.Done(): + t.Fatal("did not finish starting up module") + case err := <-ready: + require.NoError(t, err) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_blockstoreaccessor.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_blockstoreaccessor.go new file mode 100644 index 00000000000..cf75f4ab771 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_blockstoreaccessor.go @@ -0,0 +1,49 @@ +package shared_testutil + +import ( + "github.com/ipfs/go-datastore" + bstore "github.com/ipfs/go-ipfs-blockstore" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +type TestStorageBlockstoreAccessor struct { + Blockstore bstore.Blockstore +} + +var _ storagemarket.BlockstoreAccessor = (*TestStorageBlockstoreAccessor)(nil) + +func (t *TestStorageBlockstoreAccessor) Get(storagemarket.PayloadCID) (bstore.Blockstore, error) { + return t.Blockstore, nil +} + +func (t *TestStorageBlockstoreAccessor) Done(storagemarket.PayloadCID) error { + return nil +} + +func NewTestStorageBlockstoreAccessor() *TestStorageBlockstoreAccessor { + return &TestStorageBlockstoreAccessor{ + Blockstore: bstore.NewBlockstore(datastore.NewMapDatastore()), + } +} + +type TestRetrievalBlockstoreAccessor struct { + Blockstore bstore.Blockstore +} + +var _ retrievalmarket.BlockstoreAccessor = (*TestRetrievalBlockstoreAccessor)(nil) + +func (t *TestRetrievalBlockstoreAccessor) Get(retrievalmarket.DealID, retrievalmarket.PayloadCID) (bstore.Blockstore, error) { + return t.Blockstore, nil +} + +func (t *TestRetrievalBlockstoreAccessor) Done(retrievalmarket.DealID) error { + return nil +} + +func NewTestRetrievalBlockstoreAccessor() *TestRetrievalBlockstoreAccessor { + return &TestRetrievalBlockstoreAccessor{ + Blockstore: bstore.NewBlockstore(datastore.NewMapDatastore()), + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_datatransfer.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_datatransfer.go new file mode 100644 index 00000000000..0a031be3ede --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_datatransfer.go @@ -0,0 +1,138 @@ +package shared_testutil + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" +) + +// RegisteredRevalidator records a voucher type that was registered for revalidations +type RegisteredRevalidator struct { + VoucherType datatransfer.Voucher + Revalidator datatransfer.Revalidator +} + +// RegisteredVoucherType records a voucher typed that was registered +type RegisteredVoucherType struct { + VoucherType datatransfer.Voucher + Validator datatransfer.RequestValidator +} + +// RegisteredTransportConfigurer records transport configurer registered for a voucher type +type RegisteredTransportConfigurer struct { + VoucherType datatransfer.Voucher + Configurer datatransfer.TransportConfigurer +} + +// TestDataTransfer is a mock implementation of the data transfer libary +// Most of its functions have no effect +type TestDataTransfer struct { + RegisteredRevalidators []RegisteredRevalidator + RegisteredVoucherTypes []RegisteredVoucherType + RegisteredVoucherResultTypes []datatransfer.VoucherResult + RegisteredTransportConfigurers []RegisteredTransportConfigurer + Subscribers []datatransfer.Subscriber +} + +// NewTestDataTransfer returns a new test interface implementation of datatransfer.Manager +func NewTestDataTransfer() *TestDataTransfer { + return &TestDataTransfer{} +} + +// Start does nothing +func (tdt *TestDataTransfer) Start(ctx context.Context) error { + return nil +} + +// Stop does nothing +func (tdt *TestDataTransfer) Stop(context.Context) error { + return nil +} + +// RegisterVoucherType records the registred voucher type +func (tdt *TestDataTransfer) RegisterVoucherType(voucherType datatransfer.Voucher, validator datatransfer.RequestValidator) error { + tdt.RegisteredVoucherTypes = append(tdt.RegisteredVoucherTypes, RegisteredVoucherType{voucherType, validator}) + return nil +} + +// RegisterRevalidator records the registred revalidator type +func (tdt *TestDataTransfer) RegisterRevalidator(voucherType datatransfer.Voucher, revalidator datatransfer.Revalidator) error { + tdt.RegisteredRevalidators = append(tdt.RegisteredRevalidators, RegisteredRevalidator{voucherType, revalidator}) + return nil +} + +// RegisterVoucherResultType records the registered result type +func (tdt *TestDataTransfer) RegisterVoucherResultType(resultType datatransfer.VoucherResult) error { + tdt.RegisteredVoucherResultTypes = append(tdt.RegisteredVoucherResultTypes, resultType) + return nil +} + +// RegisterTransportConfigurer records the registered transport configurer +func (tdt *TestDataTransfer) RegisterTransportConfigurer(voucherType datatransfer.Voucher, configurer datatransfer.TransportConfigurer) error { + tdt.RegisteredTransportConfigurers = append(tdt.RegisteredTransportConfigurers, RegisteredTransportConfigurer{voucherType, configurer}) + return nil +} + +// OpenPushDataChannel does nothing +func (tdt *TestDataTransfer) OpenPushDataChannel(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { + return datatransfer.ChannelID{}, nil +} + +func (tdt *TestDataTransfer) RestartDataTransferChannel(ctx context.Context, chId datatransfer.ChannelID) error { + return nil +} + +// OpenPullDataChannel does nothing +func (tdt *TestDataTransfer) OpenPullDataChannel(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { + return datatransfer.ChannelID{}, nil +} + +// SendVoucher does nothing +func (tdt *TestDataTransfer) SendVoucher(ctx context.Context, chid datatransfer.ChannelID, voucher datatransfer.Voucher) error { + return nil +} + +// CloseDataTransferChannel does nothing +func (tdt *TestDataTransfer) CloseDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + return nil +} + +// PauseDataTransferChannel does nothing +func (tdt *TestDataTransfer) PauseDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + return nil +} + +// ResumeDataTransferChannel does nothing +func (tdt *TestDataTransfer) ResumeDataTransferChannel(ctx context.Context, chid datatransfer.ChannelID) error { + return nil +} + +// TransferChannelStatus returns ChannelNotFoundError +func (tdt *TestDataTransfer) TransferChannelStatus(ctx context.Context, x datatransfer.ChannelID) datatransfer.Status { + return datatransfer.ChannelNotFoundError +} + +func (tdt *TestDataTransfer) ChannelState(ctx context.Context, chid datatransfer.ChannelID) (datatransfer.ChannelState, error) { + return nil, nil +} + +// SubscribeToEvents records subscribers +func (tdt *TestDataTransfer) SubscribeToEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe { + tdt.Subscribers = append(tdt.Subscribers, subscriber) + return func() {} +} + +// InProgressChannels returns empty +func (tdt *TestDataTransfer) InProgressChannels(ctx context.Context) (map[datatransfer.ChannelID]datatransfer.ChannelState, error) { + return map[datatransfer.ChannelID]datatransfer.ChannelState{}, nil +} + +func (tdt *TestDataTransfer) OnReady(f datatransfer.ReadyFunc) { + +} + +var _ datatransfer.Manager = new(TestDataTransfer) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_deal_funds.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_deal_funds.go new file mode 100644 index 00000000000..853dcb65f17 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_deal_funds.go @@ -0,0 +1,34 @@ +package shared_testutil + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" +) + +func NewTestDealFunds() *TestDealFunds { + return &TestDealFunds{ + reserved: big.Zero(), + } +} + +type TestDealFunds struct { + reserved abi.TokenAmount + ReserveCalls []abi.TokenAmount + ReleaseCalls []abi.TokenAmount +} + +func (f *TestDealFunds) Get() abi.TokenAmount { + return f.reserved +} + +func (f *TestDealFunds) Reserve(amount abi.TokenAmount) (abi.TokenAmount, error) { + f.reserved = big.Add(f.reserved, amount) + f.ReserveCalls = append(f.ReserveCalls, amount) + return f.reserved, nil +} + +func (f *TestDealFunds) Release(amount abi.TokenAmount) (abi.TokenAmount, error) { + f.reserved = big.Sub(f.reserved, amount) + f.ReleaseCalls = append(f.ReleaseCalls, amount) + return f.reserved, nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_filestore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_filestore.go new file mode 100644 index 00000000000..35627487639 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_filestore.go @@ -0,0 +1,176 @@ +package shared_testutil + +import ( + "bytes" + "errors" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/filestore" +) + +var TestErrNotFound = errors.New("file not found") +var TestErrTempFile = errors.New("temp file creation failed") + +// TestFileStoreParams are parameters for a test file store +type TestFileStoreParams struct { + Files []filestore.File + AvailableTempFiles []filestore.File + ExpectedDeletions []filestore.Path + ExpectedOpens []filestore.Path +} + +// TestFileStore is a mocked file store that can provide programmed returns +// and test expectations +type TestFileStore struct { + files []filestore.File + availableTempFiles []filestore.File + expectedDeletions map[filestore.Path]struct{} + expectedOpens map[filestore.Path]struct{} + deletedFiles map[filestore.Path]struct{} + openedFiles map[filestore.Path]struct{} +} + +// NewTestFileStore returns a new test file store from the given parameters +func NewTestFileStore(params TestFileStoreParams) *TestFileStore { + fs := &TestFileStore{ + files: params.Files, + availableTempFiles: params.AvailableTempFiles, + expectedDeletions: make(map[filestore.Path]struct{}), + expectedOpens: make(map[filestore.Path]struct{}), + deletedFiles: make(map[filestore.Path]struct{}), + openedFiles: make(map[filestore.Path]struct{}), + } + for _, path := range params.ExpectedDeletions { + fs.expectedDeletions[path] = struct{}{} + } + for _, path := range params.ExpectedOpens { + fs.expectedOpens[path] = struct{}{} + } + return fs +} + +// Open will open a file if it's in the file store +func (fs *TestFileStore) Open(p filestore.Path) (filestore.File, error) { + var foundFile filestore.File + for _, file := range fs.files { + if p == file.Path() { + foundFile = file + break + } + } + if foundFile == nil { + return nil, TestErrNotFound + } + fs.openedFiles[p] = struct{}{} + return foundFile, nil +} + +// Create is not implement +func (fs *TestFileStore) Create(p filestore.Path) (filestore.File, error) { + panic("not implemented") +} + +// Store is not implemented +func (fs *TestFileStore) Store(p filestore.Path, f filestore.File) (filestore.Path, error) { + panic("not implemented") +} + +// Delete will delete a file if it is in the file store +func (fs *TestFileStore) Delete(p filestore.Path) error { + var foundFile filestore.File + for i, file := range fs.files { + if p == file.Path() { + foundFile = file + fs.files[i] = fs.files[len(fs.files)-1] + fs.files[len(fs.files)-1] = nil + fs.files = fs.files[:len(fs.files)-1] + break + } + } + if foundFile == nil { + return TestErrNotFound + } + fs.deletedFiles[p] = struct{}{} + return nil +} + +// CreateTemp will create a temporary file from the provided set of temporary files +func (fs *TestFileStore) CreateTemp() (filestore.File, error) { + if len(fs.availableTempFiles) == 0 { + return nil, TestErrTempFile + } + var tempFile filestore.File + tempFile, fs.availableTempFiles = fs.availableTempFiles[0], fs.availableTempFiles[1:] + fs.files = append(fs.files, tempFile) + return tempFile, nil +} + +// VerifyExpectations will verify that the correct files were opened and deleted +func (fs *TestFileStore) VerifyExpectations(t *testing.T) { + require.Equal(t, fs.openedFiles, fs.expectedOpens) + require.Equal(t, fs.deletedFiles, fs.expectedDeletions) +} + +// TestFileParams are parameters for a test file +type TestFileParams struct { + Buffer *bytes.Buffer + Size int64 + Path filestore.Path +} + +// NewTestFile generates a mocked filestore.File that has programmed returns +func NewTestFile(params TestFileParams) *TestFile { + tf := &TestFile{ + Buffer: params.Buffer, + size: params.Size, + path: params.Path, + } + if tf.Buffer == nil { + tf.Buffer = new(bytes.Buffer) + } + if tf.size == 0 { + tf.size = rand.Int63() + } + if tf.path == filestore.Path("") { + buf := make([]byte, 16) + _, _ = rand.Read(buf) + tf.path = filestore.Path(buf) + } + return tf +} + +// TestFile is a mocked version of filestore.File with preset returns +// and a byte buffer for read/writes +type TestFile struct { + *bytes.Buffer + size int64 + path filestore.Path +} + +// Path returns the preset path +func (f *TestFile) Path() filestore.Path { + return f.path +} + +// OsPath is not implemented +func (f *TestFile) OsPath() filestore.OsPath { + return filestore.OsPath(f.path) +} + +// Size returns the preset size +func (f *TestFile) Size() int64 { + return f.size +} + +// Close does nothing +func (f *TestFile) Close() error { + return nil +} + +// Seek is not implemented +func (f *TestFile) Seek(offset int64, whence int) (int64, error) { + panic("not implemented") +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_ipld_tree.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_ipld_tree.go new file mode 100644 index 00000000000..eb59f6a496f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_ipld_tree.go @@ -0,0 +1,137 @@ +package shared_testutil + +import ( + "bytes" + "context" + "errors" + "io" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" + "github.com/ipld/go-ipld-prime" + + // to register multicodec + _ "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/ipld/go-ipld-prime/fluent" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" +) + +// TestIPLDTree is a set of IPLD Data that forms a tree spread across some blocks +// with a serialized in memory representation +type TestIPLDTree struct { + Storage map[ipld.Link][]byte + LeafAlpha ipld.Node + LeafAlphaLnk ipld.Link + LeafAlphaBlock blocks.Block + LeafBeta ipld.Node + LeafBetaLnk ipld.Link + LeafBetaBlock blocks.Block + MiddleMapNode ipld.Node + MiddleMapNodeLnk ipld.Link + MiddleMapBlock blocks.Block + MiddleListNode ipld.Node + MiddleListNodeLnk ipld.Link + MiddleListBlock blocks.Block + RootNode ipld.Node + RootNodeLnk ipld.Link + RootBlock blocks.Block +} + +// NewTestIPLDTree returns a fake tree of nodes, spread across 5 blocks +func NewTestIPLDTree() TestIPLDTree { + var storage = make(map[ipld.Link][]byte) + encode := func(n ipld.Node) (ipld.Node, ipld.Link) { + lb := cidlink.LinkPrototype{Prefix: cid.Prefix{ + Version: 1, + Codec: 0x0129, + MhType: 0x13, + MhLength: 4, + }} + lsys := cidlink.DefaultLinkSystem() + lsys.StorageWriteOpener = func(ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) { + buf := bytes.Buffer{} + return &buf, func(lnk ipld.Link) error { + storage[lnk] = buf.Bytes() + return nil + }, nil + } + lnk, err := lsys.Store(ipld.LinkContext{}, lb, n) + if err != nil { + panic(err) + } + return n, lnk + } + + var ( + leafAlpha, leafAlphaLnk = encode(fluent.MustBuild(basicnode.Prototype.String, func(na fluent.NodeAssembler) { na.AssignString("alpha") })) + leafAlphaBlock, _ = blocks.NewBlockWithCid(storage[leafAlphaLnk], leafAlphaLnk.(cidlink.Link).Cid) + leafBeta, leafBetaLnk = encode(fluent.MustBuild(basicnode.Prototype.String, func(na fluent.NodeAssembler) { na.AssignString("beta") })) + leafBetaBlock, _ = blocks.NewBlockWithCid(storage[leafBetaLnk], leafBetaLnk.(cidlink.Link).Cid) + middleMapNode, middleMapNodeLnk = encode(fluent.MustBuildMap(basicnode.Prototype.Map, 3, func(ma fluent.MapAssembler) { + ma.AssembleEntry("foo").AssignBool(true) + ma.AssembleEntry("bar").AssignBool(false) + ma.AssembleEntry("nested").CreateMap(2, func(ma fluent.MapAssembler) { + ma.AssembleEntry("alink").AssignLink(leafAlphaLnk) + ma.AssembleEntry("nonlink").AssignString("zoo") + }) + })) + middleMapBlock, _ = blocks.NewBlockWithCid(storage[middleMapNodeLnk], middleMapNodeLnk.(cidlink.Link).Cid) + middleListNode, middleListNodeLnk = encode(fluent.MustBuildList(basicnode.Prototype.List, 4, func(la fluent.ListAssembler) { + la.AssembleValue().AssignLink(leafAlphaLnk) + la.AssembleValue().AssignLink(leafAlphaLnk) + la.AssembleValue().AssignLink(leafBetaLnk) + la.AssembleValue().AssignLink(leafAlphaLnk) + })) + middleListBlock, _ = blocks.NewBlockWithCid(storage[middleListNodeLnk], middleListNodeLnk.(cidlink.Link).Cid) + rootNode, rootNodeLnk = encode(fluent.MustBuildMap(basicnode.Prototype.Map, 4, func(ma fluent.MapAssembler) { + ma.AssembleEntry("plain").AssignString("olde string") + ma.AssembleEntry("linkedString").AssignLink(leafAlphaLnk) + ma.AssembleEntry("linkedMap").AssignLink(middleMapNodeLnk) + ma.AssembleEntry("linkedList").AssignLink(middleListNodeLnk) + })) + rootBlock, _ = blocks.NewBlockWithCid(storage[rootNodeLnk], rootNodeLnk.(cidlink.Link).Cid) + ) + return TestIPLDTree{ + Storage: storage, + LeafAlpha: leafAlpha, + LeafAlphaLnk: leafAlphaLnk, + LeafAlphaBlock: leafAlphaBlock, + LeafBeta: leafBeta, + LeafBetaLnk: leafBetaLnk, + LeafBetaBlock: leafBetaBlock, + MiddleMapNode: middleMapNode, + MiddleMapNodeLnk: middleMapNodeLnk, + MiddleMapBlock: middleMapBlock, + MiddleListNode: middleListNode, + MiddleListNodeLnk: middleListNodeLnk, + MiddleListBlock: middleListBlock, + RootNode: rootNode, + RootNodeLnk: rootNodeLnk, + RootBlock: rootBlock, + } +} + +// Get makes a test tree behave like a block read store +func (tt TestIPLDTree) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + data, ok := tt.Storage[cidlink.Link{Cid: c}] + if !ok { + return nil, errors.New("No block found") + } + return blocks.NewBlockWithCid(data, c) +} + +// DumpToCar puts the tree into a car file, with user configured functions +func (tt TestIPLDTree) DumpToCar(out io.Writer, userOnNewCarBlocks ...car.OnNewCarBlockFunc) error { + ctx := context.Background() + sc := car.NewSelectiveCar(ctx, tt, []car.Dag{ + { + Root: tt.RootNodeLnk.(cidlink.Link).Cid, + Selector: selectorparse.CommonSelector_ExploreAllRecursively, + }, + }) + + return sc.Write(out, userOnNewCarBlocks...) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_network_types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_network_types.go new file mode 100644 index 00000000000..89d0de03990 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_network_types.go @@ -0,0 +1,575 @@ +package shared_testutil + +import ( + "errors" + "testing" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/discovery" + rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +// QueryReader is a function to mock reading queries. +type QueryReader func() (rm.Query, error) + +// QueryResponseReader is a function to mock reading query responses. +type QueryResponseReader func() (rm.QueryResponse, error) + +// QueryResponseWriter is a function to mock writing query responses. +type QueryResponseWriter func(rm.QueryResponse) error + +// QueryWriter is a function to mock writing queries. +type QueryWriter func(rm.Query) error + +// TestRetrievalQueryStream is a retrieval query stream with predefined +// stubbed behavior. +type TestRetrievalQueryStream struct { + p peer.ID + reader QueryReader + respReader QueryResponseReader + respWriter QueryResponseWriter + writer QueryWriter +} + +// TestQueryStreamParams are parameters used to setup a TestRetrievalQueryStream. +// All parameters except the peer ID are optional. +type TestQueryStreamParams struct { + PeerID peer.ID + Reader QueryReader + RespReader QueryResponseReader + RespWriter QueryResponseWriter + Writer QueryWriter +} + +// NewTestRetrievalQueryStream returns a new TestRetrievalQueryStream with the +// behavior specified by the paramaters, or default behaviors if not specified. +func NewTestRetrievalQueryStream(params TestQueryStreamParams) *TestRetrievalQueryStream { + stream := TestRetrievalQueryStream{ + p: params.PeerID, + reader: TrivialQueryReader, + respReader: TrivialQueryResponseReader, + respWriter: TrivialQueryResponseWriter, + writer: TrivialQueryWriter, + } + if params.Reader != nil { + stream.reader = params.Reader + } + if params.Writer != nil { + stream.writer = params.Writer + } + if params.RespReader != nil { + stream.respReader = params.RespReader + } + if params.RespWriter != nil { + stream.respWriter = params.RespWriter + } + return &stream +} + +func (trqs *TestRetrievalQueryStream) SetRemotePeer(rp peer.ID) { + trqs.p = rp +} + +func (trqs *TestRetrievalQueryStream) RemotePeer() peer.ID { + return trqs.p +} + +// ReadDealStatusRequest calls the mocked query reader. +func (trqs *TestRetrievalQueryStream) ReadQuery() (rm.Query, error) { + return trqs.reader() +} + +// WriteDealStatusRequest calls the mocked query writer. +func (trqs *TestRetrievalQueryStream) WriteQuery(newQuery rm.Query) error { + return trqs.writer(newQuery) +} + +// ReadDealStatusResponse calls the mocked query response reader. +func (trqs *TestRetrievalQueryStream) ReadQueryResponse() (rm.QueryResponse, error) { + return trqs.respReader() +} + +// WriteDealStatusResponse calls the mocked query response writer. +func (trqs *TestRetrievalQueryStream) WriteQueryResponse(newResp rm.QueryResponse) error { + return trqs.respWriter(newResp) +} + +// Close closes the stream (does nothing for test). +func (trqs *TestRetrievalQueryStream) Close() error { return nil } + +// DealProposalReader is a function to mock reading deal proposals. +type DealProposalReader func() (rm.DealProposal, error) + +// DealResponseReader is a function to mock reading deal responses. +type DealResponseReader func() (rm.DealResponse, error) + +// DealResponseWriter is a function to mock writing deal responses. +type DealResponseWriter func(rm.DealResponse) error + +// DealProposalWriter is a function to mock writing deal proposals. +type DealProposalWriter func(rm.DealProposal) error + +// DealPaymentReader is a function to mock reading deal payments. +type DealPaymentReader func() (rm.DealPayment, error) + +// DealPaymentWriter is a function to mock writing deal payments. +type DealPaymentWriter func(rm.DealPayment) error + +// TestRetrievalDealStream is a retrieval deal stream with predefined +// stubbed behavior. +type TestRetrievalDealStream struct { + p peer.ID + proposalReader DealProposalReader + proposalWriter DealProposalWriter + responseReader DealResponseReader + responseWriter DealResponseWriter + paymentReader DealPaymentReader + paymentWriter DealPaymentWriter +} + +// TestDealStreamParams are parameters used to setup a TestRetrievalDealStream. +// All parameters except the peer ID are optional. +type TestDealStreamParams struct { + PeerID peer.ID + ProposalReader DealProposalReader + ProposalWriter DealProposalWriter + ResponseReader DealResponseReader + ResponseWriter DealResponseWriter + PaymentReader DealPaymentReader + PaymentWriter DealPaymentWriter +} + +// QueryStreamBuilder is a function that builds retrieval query streams. +type QueryStreamBuilder func(peer.ID) (rmnet.RetrievalQueryStream, error) + +// TestRetrievalMarketNetwork is a test network that has stubbed behavior +// for testing the retrieval market implementation +type TestRetrievalMarketNetwork struct { + receiver rmnet.RetrievalReceiver + qsbuilder QueryStreamBuilder +} + +// TestNetworkParams are parameters for setting up a test network. All +// parameters other than the receiver are optional +type TestNetworkParams struct { + QueryStreamBuilder QueryStreamBuilder + Receiver rmnet.RetrievalReceiver +} + +// NewTestRetrievalMarketNetwork returns a new TestRetrievalMarketNetwork with the +// behavior specified by the paramaters, or default behaviors if not specified. +func NewTestRetrievalMarketNetwork(params TestNetworkParams) *TestRetrievalMarketNetwork { + trmn := TestRetrievalMarketNetwork{ + qsbuilder: TrivialNewQueryStream, + receiver: params.Receiver, + } + + if params.QueryStreamBuilder != nil { + trmn.qsbuilder = params.QueryStreamBuilder + } + return &trmn +} + +// NewDealStatusStream returns a query stream. +// Note this always returns the same stream. This is fine for testing for now. +func (trmn *TestRetrievalMarketNetwork) NewQueryStream(id peer.ID) (rmnet.RetrievalQueryStream, error) { + return trmn.qsbuilder(id) +} + +// SetDelegate sets the market receiver +func (trmn *TestRetrievalMarketNetwork) SetDelegate(r rmnet.RetrievalReceiver) error { + trmn.receiver = r + return nil +} + +// ReceiveQueryStream simulates receiving a query stream +func (trmn *TestRetrievalMarketNetwork) ReceiveQueryStream(qs rmnet.RetrievalQueryStream) { + trmn.receiver.HandleQueryStream(qs) +} + +// StopHandlingRequests sets receiver to nil +func (trmn *TestRetrievalMarketNetwork) StopHandlingRequests() error { + trmn.receiver = nil + return nil +} + +// ID returns the peer id of this host (empty peer ID in test) +func (trmn *TestRetrievalMarketNetwork) ID() peer.ID { + return peer.ID("") +} + +// AddAddrs does nothing in test +func (trmn *TestRetrievalMarketNetwork) AddAddrs(peer.ID, []ma.Multiaddr) { +} + +var _ rmnet.RetrievalMarketNetwork = &TestRetrievalMarketNetwork{} + +// Some convenience builders + +// FailNewQueryStream always fails +func FailNewQueryStream(peer.ID) (rmnet.RetrievalQueryStream, error) { + return nil, errors.New("new query stream failed") +} + +// FailQueryReader always fails +func FailQueryReader() (rm.Query, error) { + return rm.QueryUndefined, errors.New("read query failed") +} + +// FailQueryWriter always fails +func FailQueryWriter(rm.Query) error { + return errors.New("write query failed") +} + +// FailResponseReader always fails +func FailResponseReader() (rm.QueryResponse, error) { + return rm.QueryResponseUndefined, errors.New("query response failed") +} + +// FailResponseWriter always fails +func FailResponseWriter(rm.QueryResponse) error { + return errors.New("write query response failed") +} + +// FailDealProposalWriter always fails +func FailDealProposalWriter(rm.DealProposal) error { + return errors.New("write proposal failed") +} + +// FailDealProposalReader always fails +func FailDealProposalReader() (rm.DealProposal, error) { + return rm.DealProposalUndefined, errors.New("read proposal failed") +} + +// FailDealResponseWriter always fails +func FailDealResponseWriter(rm.DealResponse) error { + return errors.New("write proposal failed") +} + +// FailDealResponseReader always fails +func FailDealResponseReader() (rm.DealResponse, error) { + return rm.DealResponseUndefined, errors.New("write proposal failed") +} + +// FailDealPaymentWriter always fails +func FailDealPaymentWriter(rm.DealPayment) error { + return errors.New("write proposal failed") +} + +// FailDealPaymentReader always fails +func FailDealPaymentReader() (rm.DealPayment, error) { + return rm.DealPaymentUndefined, errors.New("write proposal failed") +} + +// TrivialNewQueryStream succeeds trivially, returning an empty query stream. +func TrivialNewQueryStream(p peer.ID) (rmnet.RetrievalQueryStream, error) { + return NewTestRetrievalQueryStream(TestQueryStreamParams{PeerID: p}), nil +} + +// ExpectPeerOnQueryStreamBuilder fails if the peer used does not match the expected peer +func ExpectPeerOnQueryStreamBuilder(t *testing.T, expectedPeer peer.ID, qb QueryStreamBuilder, msgAndArgs ...interface{}) QueryStreamBuilder { + return func(p peer.ID) (rmnet.RetrievalQueryStream, error) { + require.Equal(t, expectedPeer, p, msgAndArgs...) + return qb(p) + } +} + +// TrivialQueryReader succeeds trivially, returning an empty query. +func TrivialQueryReader() (rm.Query, error) { + return rm.Query{}, nil +} + +// TrivialQueryResponseReader succeeds trivially, returning an empty query response. +func TrivialQueryResponseReader() (rm.QueryResponse, error) { + return rm.QueryResponse{}, nil +} + +// TrivialQueryWriter succeeds trivially, returning no error. +func TrivialQueryWriter(rm.Query) error { + return nil +} + +// TrivialQueryResponseWriter succeeds trivially, returning no error. +func TrivialQueryResponseWriter(rm.QueryResponse) error { + return nil +} + +// StubbedQueryReader returns the given query when called +func StubbedQueryReader(query rm.Query) QueryReader { + return func() (rm.Query, error) { + return query, nil + } +} + +// StubbedQueryResponseReader returns the given query response when called +func StubbedQueryResponseReader(queryResponse rm.QueryResponse) QueryResponseReader { + return func() (rm.QueryResponse, error) { + return queryResponse, nil + } +} + +// ExpectQueryWriter will fail if the written query and expected query don't match +func ExpectQueryWriter(t *testing.T, expectedQuery rm.Query, msgAndArgs ...interface{}) QueryWriter { + return func(query rm.Query) error { + require.Equal(t, expectedQuery, query, msgAndArgs...) + return nil + } +} + +// ExpectQueryResponseWriter will fail if the written query response and expected query response don't match +func ExpectQueryResponseWriter(t *testing.T, expectedQueryResponse rm.QueryResponse, msgAndArgs ...interface{}) QueryResponseWriter { + return func(queryResponse rm.QueryResponse) error { + require.Equal(t, expectedQueryResponse, queryResponse, msgAndArgs...) + return nil + } +} + +// ExpectDealResponseWriter will fail if the written query and expected query don't match +func ExpectDealResponseWriter(t *testing.T, expectedDealResponse rm.DealResponse, msgAndArgs ...interface{}) DealResponseWriter { + return func(dealResponse rm.DealResponse) error { + require.Equal(t, expectedDealResponse, dealResponse, msgAndArgs...) + return nil + } +} + +// QueryReadWriter will read only if something is written, otherwise it errors +func QueryReadWriter() (QueryReader, QueryWriter) { + var q rm.Query + var written bool + queryRead := func() (rm.Query, error) { + if written { + return q, nil + } + return rm.QueryUndefined, errors.New("Unable to read value") + } + queryWrite := func(wq rm.Query) error { + q = wq + written = true + return nil + } + return queryRead, queryWrite +} + +// QueryResponseReadWriter will read only if something is written, otherwise it errors +func QueryResponseReadWriter() (QueryResponseReader, QueryResponseWriter) { + var q rm.QueryResponse + var written bool + queryResponseRead := func() (rm.QueryResponse, error) { + if written { + return q, nil + } + return rm.QueryResponseUndefined, errors.New("Unable to read value") + } + queryResponseWrite := func(wq rm.QueryResponse) error { + q = wq + written = true + return nil + } + return queryResponseRead, queryResponseWrite +} + +// StubbedDealProposalReader returns the given proposal when called +func StubbedDealProposalReader(proposal rm.DealProposal) DealProposalReader { + return func() (rm.DealProposal, error) { + return proposal, nil + } +} + +// StubbedDealResponseReader returns the given deal response when called +func StubbedDealResponseReader(response rm.DealResponse) DealResponseReader { + return func() (rm.DealResponse, error) { + return response, nil + } +} + +// StubbedDealPaymentReader returns the given deal payment when called +func StubbedDealPaymentReader(payment rm.DealPayment) DealPaymentReader { + return func() (rm.DealPayment, error) { + return payment, nil + } +} + +// StorageDealProposalReader is a function to mock reading deal proposals. +type StorageDealProposalReader func() (smnet.Proposal, error) + +// StorageDealResponseReader is a function to mock reading deal responses. +type StorageDealResponseReader func() (smnet.SignedResponse, []byte, error) + +// StorageDealResponseWriter is a function to mock writing deal responses. +type StorageDealResponseWriter func(smnet.SignedResponse, smnet.ResigningFunc) error + +// StorageDealProposalWriter is a function to mock writing deal proposals. +type StorageDealProposalWriter func(smnet.Proposal) error + +// TestStorageDealStream is a retrieval deal stream with predefined +// stubbed behavior. +type TestStorageDealStream struct { + p peer.ID + proposalReader StorageDealProposalReader + proposalWriter StorageDealProposalWriter + responseReader StorageDealResponseReader + responseWriter StorageDealResponseWriter + + CloseCount int + CloseError error +} + +// TestStorageDealStreamParams are parameters used to setup a TestStorageDealStream. +// All parameters except the peer ID are optional. +type TestStorageDealStreamParams struct { + PeerID peer.ID + ProposalReader StorageDealProposalReader + ProposalWriter StorageDealProposalWriter + ResponseReader StorageDealResponseReader + ResponseWriter StorageDealResponseWriter +} + +var _ smnet.StorageDealStream = &TestStorageDealStream{} + +// NewTestStorageDealStream returns a new TestStorageDealStream with the +// behavior specified by the paramaters, or default behaviors if not specified. +func NewTestStorageDealStream(params TestStorageDealStreamParams) *TestStorageDealStream { + stream := TestStorageDealStream{ + p: params.PeerID, + proposalReader: TrivialStorageDealProposalReader, + proposalWriter: TrivialStorageDealProposalWriter, + responseReader: TrivialStorageDealResponseReader, + responseWriter: TrivialStorageDealResponseWriter, + } + if params.ProposalReader != nil { + stream.proposalReader = params.ProposalReader + } + if params.ProposalWriter != nil { + stream.proposalWriter = params.ProposalWriter + } + if params.ResponseReader != nil { + stream.responseReader = params.ResponseReader + } + if params.ResponseWriter != nil { + stream.responseWriter = params.ResponseWriter + } + return &stream +} + +// ReadDealProposal calls the mocked deal proposal reader function. +func (tsds *TestStorageDealStream) ReadDealProposal() (smnet.Proposal, error) { + return tsds.proposalReader() +} + +// WriteDealProposal calls the mocked deal proposal writer function. +func (tsds *TestStorageDealStream) WriteDealProposal(dealProposal smnet.Proposal) error { + return tsds.proposalWriter(dealProposal) +} + +// ReadDealResponse calls the mocked deal response reader function. +func (tsds *TestStorageDealStream) ReadDealResponse() (smnet.SignedResponse, []byte, error) { + return tsds.responseReader() +} + +// WriteDealResponse calls the mocked deal response writer function. +func (tsds *TestStorageDealStream) WriteDealResponse(dealResponse smnet.SignedResponse, resigningFunc smnet.ResigningFunc) error { + return tsds.responseWriter(dealResponse, resigningFunc) +} + +// RemotePeer returns the other peer +func (tsds TestStorageDealStream) RemotePeer() peer.ID { return tsds.p } + +// Close closes the stream (does nothing for mocked stream) +func (tsds *TestStorageDealStream) Close() error { + tsds.CloseCount += 1 + return tsds.CloseError +} + +// TrivialStorageDealProposalReader succeeds trivially, returning an empty proposal. +func TrivialStorageDealProposalReader() (smnet.Proposal, error) { + return smnet.Proposal{}, nil +} + +// TrivialStorageDealResponseReader succeeds trivially, returning an empty deal response. +func TrivialStorageDealResponseReader() (smnet.SignedResponse, []byte, error) { + return MakeTestStorageNetworkSignedResponse(), nil, nil +} + +// TrivialStorageDealProposalWriter succeeds trivially, returning no error. +func TrivialStorageDealProposalWriter(smnet.Proposal) error { + return nil +} + +// TrivialStorageDealResponseWriter succeeds trivially, returning no error. +func TrivialStorageDealResponseWriter(smnet.SignedResponse, smnet.ResigningFunc) error { + return nil +} + +// StubbedStorageProposalReader returns the given proposal when called +func StubbedStorageProposalReader(proposal smnet.Proposal) StorageDealProposalReader { + return func() (smnet.Proposal, error) { + return proposal, nil + } +} + +// StubbedStorageResponseReader returns the given deal response when called +func StubbedStorageResponseReader(response smnet.SignedResponse) StorageDealResponseReader { + return func() (smnet.SignedResponse, []byte, error) { + origBytes, _ := cborutil.Dump(&response.Response) + return response, origBytes, nil + } +} + +// FailStorageProposalWriter always fails +func FailStorageProposalWriter(smnet.Proposal) error { + return errors.New("write proposal failed") +} + +// FailStorageProposalReader always fails +func FailStorageProposalReader() (smnet.Proposal, error) { + return smnet.ProposalUndefined, errors.New("read proposal failed") +} + +// FailStorageResponseWriter always fails +func FailStorageResponseWriter(smnet.SignedResponse) error { + return errors.New("write proposal failed") +} + +// FailStorageResponseReader always fails +func FailStorageResponseReader() (smnet.SignedResponse, []byte, error) { + return smnet.SignedResponseUndefined, nil, errors.New("read response failed") +} + +// TestPeerResolver provides a fake retrievalmarket PeerResolver +type TestPeerResolver struct { + Peers []rm.RetrievalPeer + ResolverError error +} + +func (tpr TestPeerResolver) GetPeers(cid.Cid) ([]rm.RetrievalPeer, error) { + return tpr.Peers, tpr.ResolverError +} + +var _ discovery.PeerResolver = &TestPeerResolver{} + +type TestPeerTagger struct { + TagCalls []peer.ID + UntagCalls []peer.ID +} + +func NewTestPeerTagger() *TestPeerTagger { + return &TestPeerTagger{} +} + +func (pt *TestPeerTagger) TagPeer(id peer.ID, _ string) { + pt.TagCalls = append(pt.TagCalls, id) +} + +func (pt *TestPeerTagger) UntagPeer(id peer.ID, _ string) { + pt.UntagCalls = append(pt.UntagCalls, id) +} + +var _ smnet.PeerTagger = &TestPeerTagger{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_piecestore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_piecestore.go new file mode 100644 index 00000000000..14be5e0457d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/test_piecestore.go @@ -0,0 +1,159 @@ +package shared_testutil + +import ( + "context" + "errors" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" +) + +// TestPieceStore is piecestore who's query results are mocked +type TestPieceStore struct { + addPieceBlockLocationsError error + addDealForPieceError error + getPieceInfoError error + piecesStubbed map[cid.Cid]piecestore.PieceInfo + piecesExpected map[cid.Cid]struct{} + piecesReceived map[cid.Cid]struct{} + cidInfosStubbed map[cid.Cid]piecestore.CIDInfo + cidInfosExpected map[cid.Cid]struct{} + cidInfosReceived map[cid.Cid]struct{} +} + +// TestPieceStoreParams sets parameters for a piece store +type TestPieceStoreParams struct { + AddDealForPieceError error + AddPieceBlockLocationsError error + GetPieceInfoError error +} + +var _ piecestore.PieceStore = &TestPieceStore{} + +// NewTestPieceStore creates a TestPieceStore +func NewTestPieceStore() *TestPieceStore { + return NewTestPieceStoreWithParams(TestPieceStoreParams{}) +} + +// NewTestPieceStoreWithParams creates a TestPieceStore with the given parameters +func NewTestPieceStoreWithParams(params TestPieceStoreParams) *TestPieceStore { + return &TestPieceStore{ + addDealForPieceError: params.AddDealForPieceError, + addPieceBlockLocationsError: params.AddPieceBlockLocationsError, + getPieceInfoError: params.GetPieceInfoError, + piecesStubbed: make(map[cid.Cid]piecestore.PieceInfo), + piecesExpected: make(map[cid.Cid]struct{}), + piecesReceived: make(map[cid.Cid]struct{}), + cidInfosStubbed: make(map[cid.Cid]piecestore.CIDInfo), + cidInfosExpected: make(map[cid.Cid]struct{}), + cidInfosReceived: make(map[cid.Cid]struct{}), + } +} + +// StubPiece creates a return value for the given piece cid without expecting it +// to be called +func (tps *TestPieceStore) StubPiece(pieceCid cid.Cid, pieceInfo piecestore.PieceInfo) { + tps.piecesStubbed[pieceCid] = pieceInfo +} + +// ExpectPiece records a piece being expected to be queried and return the given piece info +func (tps *TestPieceStore) ExpectPiece(pieceCid cid.Cid, pieceInfo piecestore.PieceInfo) { + tps.piecesExpected[pieceCid] = struct{}{} + tps.StubPiece(pieceCid, pieceInfo) +} + +// ExpectMissingPiece records a piece being expected to be queried and should fail +func (tps *TestPieceStore) ExpectMissingPiece(pieceCid cid.Cid) { + tps.piecesExpected[pieceCid] = struct{}{} +} + +// StubCID creates a return value for the given CID without expecting it +// to be called +func (tps *TestPieceStore) StubCID(c cid.Cid, cidInfo piecestore.CIDInfo) { + tps.cidInfosStubbed[c] = cidInfo +} + +// ExpectCID records a CID being expected to be queried and return the given CID info +func (tps *TestPieceStore) ExpectCID(c cid.Cid, cidInfo piecestore.CIDInfo) { + tps.cidInfosExpected[c] = struct{}{} + tps.StubCID(c, cidInfo) +} + +// ExpectMissingCID records a CID being expected to be queried and should fail +func (tps *TestPieceStore) ExpectMissingCID(c cid.Cid) { + tps.cidInfosExpected[c] = struct{}{} +} + +// VerifyExpectations verifies that the piecestore was queried in the expected ways +func (tps *TestPieceStore) VerifyExpectations(t *testing.T) { + require.Equal(t, tps.piecesExpected, tps.piecesReceived) + require.Equal(t, tps.cidInfosExpected, tps.cidInfosReceived) +} + +// AddDealForPiece returns a preprogrammed error +func (tps *TestPieceStore) AddDealForPiece(pieceCID cid.Cid, dealInfo piecestore.DealInfo) error { + return tps.addDealForPieceError +} + +// AddPieceBlockLocations returns a preprogrammed error +func (tps *TestPieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]piecestore.BlockLocation) error { + return tps.addPieceBlockLocationsError +} + +func (tps *TestPieceStore) ReturnErrorFromGetPieceInfo(err error) { + tps.getPieceInfoError = err +} + +// GetPieceInfo returns a piece info if it's been stubbed +func (tps *TestPieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { + if tps.getPieceInfoError != nil { + return piecestore.PieceInfoUndefined, tps.getPieceInfoError + } + + tps.piecesReceived[pieceCID] = struct{}{} + + pio, ok := tps.piecesStubbed[pieceCID] + if ok { + return pio, nil + } + _, ok = tps.piecesExpected[pieceCID] + if ok { + return piecestore.PieceInfoUndefined, retrievalmarket.ErrNotFound + } + return piecestore.PieceInfoUndefined, errors.New("GetPieceInfo failed") +} + +// GetCIDInfo returns cid info if it's been stubbed +func (tps *TestPieceStore) GetCIDInfo(c cid.Cid) (piecestore.CIDInfo, error) { + tps.cidInfosReceived[c] = struct{}{} + + cio, ok := tps.cidInfosStubbed[c] + if ok { + return cio, nil + } + _, ok = tps.cidInfosExpected[c] + if ok { + return piecestore.CIDInfoUndefined, retrievalmarket.ErrNotFound + } + return piecestore.CIDInfoUndefined, errors.New("GetCIDInfo failed") +} + +func (tps *TestPieceStore) ListCidInfoKeys() ([]cid.Cid, error) { + panic("do not call me") +} + +func (tps *TestPieceStore) ListPieceInfoKeys() ([]cid.Cid, error) { + panic("do not call me") +} + +func (tps *TestPieceStore) Start(ctx context.Context) error { + return nil +} + +func (tps *TestPieceStore) OnReady(ready shared.ReadyFunc) { +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testchannel.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testchannel.go new file mode 100644 index 00000000000..5a1d2df30f6 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testchannel.go @@ -0,0 +1,269 @@ +package shared_testutil + +import ( + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/exp/rand" + + datatransfer "github.com/filecoin-project/go-data-transfer" +) + +// TestChannelParams are params for a new test data transfer channel +type TestChannelParams struct { + TransferID datatransfer.TransferID + BaseCID cid.Cid + Selector ipld.Node + SelfPeer peer.ID + Sender peer.ID + Recipient peer.ID + TotalSize uint64 + IsPull bool + Message string + Sent uint64 + Received uint64 + Queued uint64 + Status datatransfer.Status + Vouchers []datatransfer.Voucher + VoucherResults []datatransfer.VoucherResult + ReceivedCids []cid.Cid +} + +// TestChannel implements a datatransfer channel with set values +type TestChannel struct { + selfPeer peer.ID + transferID datatransfer.TransferID + baseCID cid.Cid + selector ipld.Node + sender peer.ID + recipient peer.ID + totalSize uint64 + message string + isPull bool + sent uint64 + received uint64 + queued uint64 + status datatransfer.Status + vouchers []datatransfer.Voucher + voucherResults []datatransfer.VoucherResult + receivedCids []cid.Cid +} + +// FakeDTType is a fake voucher type +type FakeDTType struct{} + +// Type returns an identifier +func (f FakeDTType) Type() datatransfer.TypeIdentifier { return "Fake" } + +// NewTestChannel makes a test channel with default params plus non-zero +// values for TestChannelParams +func NewTestChannel(params TestChannelParams) datatransfer.ChannelState { + peers := GeneratePeers(2) + tc := &TestChannel{ + selfPeer: peers[0], + transferID: datatransfer.TransferID(rand.Uint64()), + baseCID: GenerateCids(1)[0], + selector: selectorparse.CommonSelector_ExploreAllRecursively, + sender: peers[0], + recipient: peers[1], + totalSize: rand.Uint64(), + isPull: params.IsPull, + status: params.Status, + sent: rand.Uint64(), + received: rand.Uint64(), + queued: rand.Uint64(), + vouchers: []datatransfer.Voucher{FakeDTType{}}, + voucherResults: []datatransfer.VoucherResult{FakeDTType{}}, + } + + tc.receivedCids = params.ReceivedCids + + if params.TransferID != 0 { + tc.transferID = params.TransferID + } + if (params.BaseCID != cid.Cid{}) { + tc.baseCID = params.BaseCID + } + if params.Selector != nil { + tc.selector = params.Selector + } + if params.SelfPeer != peer.ID("") { + tc.selfPeer = params.SelfPeer + } + + if params.Sender != peer.ID("") { + tc.sender = params.Sender + } + if params.Recipient != peer.ID("") { + tc.recipient = params.Recipient + } + if params.TotalSize != 0 { + tc.totalSize = params.TotalSize + } + if params.Message != "" { + tc.message = params.Message + } + if params.Vouchers != nil { + tc.vouchers = params.Vouchers + } + if params.VoucherResults != nil { + tc.voucherResults = params.VoucherResults + } + if params.Sent != 0 { + tc.sent = params.Sent + } + if params.Received != 0 { + tc.received = params.Received + } + if params.Queued != 0 { + tc.queued = params.Queued + } + return tc +} + +func (tc *TestChannel) ReceivedCidsLen() int { + return len(tc.receivedCids) +} + +func (tc *TestChannel) ReceivedCidsTotal() int64 { + return int64(len(tc.receivedCids)) +} + +// TransferID returns the transfer id for this channel +func (tc *TestChannel) TransferID() datatransfer.TransferID { + return tc.transferID +} + +// BaseCID returns the CID that is at the root of this data transfer +func (tc *TestChannel) BaseCID() cid.Cid { + return tc.baseCID +} + +// Selector returns the IPLD selector for this data transfer (represented as +// an IPLD node) +func (tc *TestChannel) Selector() ipld.Node { + return tc.selector +} + +// ReceivedCids returns the cids received so far +func (tc *TestChannel) ReceivedCids() []cid.Cid { + return tc.receivedCids +} + +// TODO actual implementation of those +func (tc *TestChannel) MissingCids() []cid.Cid { + return nil +} + +func (tc *TestChannel) QueuedCidsTotal() int64 { + return 0 +} + +func (tc *TestChannel) SentCidsTotal() int64 { + return 0 +} + +// Voucher returns the voucher for this data transfer +func (tc *TestChannel) Voucher() datatransfer.Voucher { + return tc.vouchers[0] +} + +// Sender returns the peer id for the node that is sending data +func (tc *TestChannel) Sender() peer.ID { + return tc.sender +} + +// Recipient returns the peer id for the node that is receiving data +func (tc *TestChannel) Recipient() peer.ID { + return tc.recipient +} + +// TotalSize returns the total size for the data being transferred +func (tc *TestChannel) TotalSize() uint64 { + return tc.totalSize +} + +// IsPull returns whether this is a pull request based on who initiated it +func (tc *TestChannel) IsPull() bool { + return tc.isPull +} + +// ChannelID returns the channel id for this channel +func (tc *TestChannel) ChannelID() datatransfer.ChannelID { + if tc.isPull { + return datatransfer.ChannelID{ID: tc.transferID, Initiator: tc.recipient, Responder: tc.sender} + } else { + return datatransfer.ChannelID{ID: tc.transferID, Initiator: tc.sender, Responder: tc.recipient} + } +} + +// SelfPeer returns the peer this channel belongs to +func (tc *TestChannel) SelfPeer() peer.ID { + return tc.selfPeer +} + +// OtherPeer returns the channel counter party peer +func (tc *TestChannel) OtherPeer() peer.ID { + if tc.selfPeer == tc.sender { + return tc.recipient + } + return tc.sender +} + +// OtherParty returns the opposite party in the channel to the passed in party +func (tc *TestChannel) OtherParty(thisParty peer.ID) peer.ID { + if tc.sender == thisParty { + return tc.recipient + } + return tc.sender +} + +// Status is the current status of this channel +func (tc *TestChannel) Status() datatransfer.Status { + return tc.status +} + +// Sent returns the number of bytes sent +func (tc *TestChannel) Sent() uint64 { + return tc.sent +} + +// Received returns the number of bytes received +func (tc *TestChannel) Received() uint64 { + return tc.received +} + +// Received returns the number of bytes received +func (tc *TestChannel) Queued() uint64 { + return tc.queued +} + +// Message offers additional information about the current status +func (tc *TestChannel) Message() string { + return tc.message +} + +// Vouchers returns all vouchers sent on this channel +func (tc *TestChannel) Vouchers() []datatransfer.Voucher { + return tc.vouchers +} + +// VoucherResults are results of vouchers sent on the channel +func (tc *TestChannel) VoucherResults() []datatransfer.VoucherResult { + return tc.voucherResults +} + +// LastVoucher returns the last voucher sent on the channel +func (tc *TestChannel) LastVoucher() datatransfer.Voucher { + return tc.vouchers[len(tc.vouchers)-1] +} + +// LastVoucherResult returns the last voucher result sent on the channel +func (tc *TestChannel) LastVoucherResult() datatransfer.VoucherResult { + return tc.voucherResults[len(tc.voucherResults)-1] +} + +func (tc *TestChannel) Stages() *datatransfer.ChannelStages { + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testutil.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testutil.go new file mode 100644 index 00000000000..41a4daf31bf --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/testutil.go @@ -0,0 +1,142 @@ +package shared_testutil + +import ( + "bytes" + "context" + "fmt" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + "github.com/jbenet/go-random" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +//var prioritySeq int +var seedSeq int64 + +// RandomBytes returns a byte array of the given size with random values. +func RandomBytes(n int64) []byte { + data := new(bytes.Buffer) + random.WritePseudoRandomBytes(n, data, seedSeq) // nolint: gosec,errcheck + seedSeq++ + return data.Bytes() +} + +// GenerateBlocksOfSize generates a series of blocks of the given byte size +func GenerateBlocksOfSize(n int, size int64) []blocks.Block { + generatedBlocks := make([]blocks.Block, 0, n) + for i := 0; i < n; i++ { + b := blocks.NewBlock(RandomBytes(size)) + generatedBlocks = append(generatedBlocks, b) + + } + return generatedBlocks +} + +// GenerateCids produces n content identifiers. +func GenerateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +var peerSeq int + +// GeneratePeers creates n peer ids. +func GeneratePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(fmt.Sprint(peerSeq)) + peerIds = append(peerIds, p) + } + return peerIds +} + +// ContainsPeer returns true if a peer is found n a list of peers. +func ContainsPeer(peers []peer.ID, p peer.ID) bool { + for _, n := range peers { + if p == n { + return true + } + } + return false +} + +// IndexOf returns the index of a given cid in an array of blocks +func IndexOf(blks []blocks.Block, c cid.Cid) int { + for i, n := range blks { + if n.Cid() == c { + return i + } + } + return -1 +} + +// ContainsBlock returns true if a block is found n a list of blocks +func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { + return IndexOf(blks, block.Cid()) != -1 +} + +// TestVoucherEquality verifies that two vouchers are equal to one another +func TestVoucherEquality(t *testing.T, a, b *paych.SignedVoucher) { + aB, err := cborutil.Dump(a) + require.NoError(t, err) + bB, err := cborutil.Dump(b) + require.NoError(t, err) + require.True(t, bytes.Equal(aB, bB)) +} + +// AssertDealState asserts equality of StorageDealStatus but with better error messaging +func AssertDealState(t *testing.T, expected storagemarket.StorageDealStatus, actual storagemarket.StorageDealStatus) { + assert.Equal(t, expected, actual, + "Unexpected deal status\nexpected: %s (%d)\nactual : %s (%d)", + storagemarket.DealStates[expected], expected, + storagemarket.DealStates[actual], actual, + ) +} + +func AssertRetrievalDealState(t *testing.T, expected, actual retrievalmarket.DealStatus) { + assert.Equal(t, expected, actual, + "Unexpected retrieval deal status:\nexpected: %s (%d)\nactual : %s (%d)", + retrievalmarket.DealStatuses[expected], expected, + retrievalmarket.DealStatuses[actual], actual, + ) +} + +func GenerateCid(t *testing.T, o interface{}) cid.Cid { + node, err := cborutil.AsIpld(o) + assert.NoError(t, err) + return node.Cid() +} + +func DatastoreAtVersion(t *testing.T, ds datastore.Batching, version versioning.VersionKey) datastore.Batching { + err := ds.Put(context.TODO(), datastore.NewKey("/versions/current"), []byte(version)) + require.NoError(t, err) + return namespace.Wrap(ds, datastore.NewKey(fmt.Sprintf("/%s", version))) +} + +func NewIDAddr(t testing.TB, id uint64) address.Address { + ret, err := address.NewIDAddress(id) + require.NoError(t, err) + return ret +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/unixfs/unixfs_testutil.go b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/unixfs/unixfs_testutil.go new file mode 100644 index 00000000000..fcf8c3fdd9f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/shared_testutil/unixfs/unixfs_testutil.go @@ -0,0 +1,69 @@ +package unixfs + +import ( + "context" + "os" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-cidutil" + chunk "github.com/ipfs/go-ipfs-chunker" + files "github.com/ipfs/go-ipfs-files" + ipldformat "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipfs/go-unixfs/importer/balanced" + ihelper "github.com/ipfs/go-unixfs/importer/helpers" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +const ( + defaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) + unixfsChunkSize = uint64(1 << 10) + unixfsLinksPerLevel = 1024 +) + +func WriteUnixfsDAGTo(t *testing.T, path string, into ipldformat.DAGService) cid.Cid { + file, err := os.Open(path) + require.NoError(t, err) + defer file.Close() + + stat, err := file.Stat() + require.NoError(t, err) + + // get a IPLD reader path file + // required to write the Unixfs DAG blocks to a filestore + rpf, err := files.NewReaderPathFile(file.Name(), file, stat) + require.NoError(t, err) + + // generate the dag and get the root + // import to UnixFS + prefix, err := merkledag.PrefixForCidVersion(1) + require.NoError(t, err) + + prefix.MhType = defaultHashFunction + + bufferedDS := ipldformat.NewBufferedDAG(context.Background(), into) + params := ihelper.DagBuilderParams{ + Maxlinks: unixfsLinksPerLevel, + RawLeaves: true, + CidBuilder: cidutil.InlineBuilder{ + Builder: prefix, + Limit: 126, + }, + Dagserv: bufferedDS, + NoCopy: true, + } + + db, err := params.New(chunk.NewSizeSplitter(rpf, int64(unixfsChunkSize))) + require.NoError(t, err) + + nd, err := balanced.Layout(db) + require.NoError(t, err) + + err = bufferedDS.Commit() + require.NoError(t, err) + require.NoError(t, rpf.Close()) + + return nd.Cid() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/README.md b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/README.md new file mode 100644 index 00000000000..4c076517948 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/README.md @@ -0,0 +1,378 @@ +# storagemarket +The storagemarket module is intended for Filecoin node implementations written in Go. +It implements functionality to allow execution of storage market deals, and for Providers to set their storage price on the Filecoin network. +The node implementation must provide access to chain operations, and persistent +data storage. + +## Table of Contents +* [Background reading](#background-reading) +* [Installation](#Installation) +* [Operation](#Operation) +* [Implementation](#Implementation) + * [StorageCommon](#StorageCommon) + * [StorageClientNode](#StorageClientNode) + * [StorageProviderNode](#StorageProviderNode) +* [Technical Documentation](#technical-documentation) + +## Background reading + +Please see the +[Filecoin Storage Market Specification](https://filecoin-project.github.io/specs/#systems__filecoin_markets__storage_market). + +## Installation +The build process for storagemarket requires Go >= v1.13. + +To install: +```bash +go get github.com/filecoin-project/go-fil-markets/storagemarket +``` + +## Operation +The `storagemarket` package provides high level APIs to execute data storage deals between a +storage client and a storage provider (a.k.a. storage miner) on the Filecoin network. +The Filecoin node must implement the [`StorageCommon`](#StorageCommon), [`StorageProviderNode`](#StorageProviderNode), and +[`StorageClientNode`](#StorageClientNode) interfaces in order to construct and use the module. + +Deals are expected to survive a node restart; deals and related information are + expected to be stored on disk. + +`storagemarket` communicates its deal operations and requested data via + [go-data-transfer](https://github.com/filecoin-project/go-data-transfer) using + [go-graphsync](https://github.com/ipfs/go-graphsync). + +## Implementation + +### General Steps +1. Decide if your node can be configured as a Storage Provider, a Storage Client or both. +1. Determine how and where your retrieval calls to StorageProvider and StorageClient functions + will be made. +1. Implement the required interfaces as described in this section. +1. Construct a [StorageClient](#StorageClient) and/or [StorageProvider](#StorageProvider) in your node's startup. +Call the StorageProvider's `Start` function it in the appropriate place, and its `Stop` +function in the appropriate place. +1. Expose desired `storagemarket` functionality to whatever internal modules desired, such as + command line interface, JSON RPC, or HTTP API. + +Implement the [`StorageCommon`](#StorageCommon), [`StorageProviderNode`](#StorageProviderNode), and + [`StorageClientNode`](#StorageClientNode) interfaces in + [storagemarket/types.go](./types.go), described below: + +### StorageCommon +`StorageCommon` is an interface common to both `StorageProviderNode` and `StorageClientNode`. Its + functions are: +* [`GetChainHead`](#GetChainHead) +* [`AddFunds`](#AddFunds) +* [`ReserveFunds`](#ReserveFunds) +* [`ReleaseFunds`](#ReleaseFunds) +* [`GetBalance`](#GetBalance) +* [`VerifySignature`](#VerifySignature) +* [`WaitForMessage`](#WaitForMessage) +* [`SignBytes`](#SignBytes) +* [`GetMinerWorkerAddress`](#GetMinerWorkerAddress) + +#### AddFunds +```go +func AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) +``` + +Send `amount` to `addr` by posting a message on chain. Return the message CID. + +#### ReserveFunds +```go +func ReserveFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount) (cid.Cid, error) +``` + +Add `amount` to the total reserved funds for `addr`. If total available balance for `addr` in StorageMarketActor is not greater than total reserved, `wallet` should send any needed balance to `addr` by posting a message on chain. Returns the message CID. + +#### ReserveFunds +```go +func ReleaseFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) +``` + +Release `amount` funds from reserved total for `addr`. No withdrawal is performed for `addr` in the storage market actor but the funds released become +available for future withdrawal +(if new total reserved < total available in SMA) + +#### GetBalance +```go +func GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) +``` +Retrieve the Balance of FIL in `addr`. A `Balance` consists of `Locked` and `Available` `abi.TokenAmount`s + +#### VerifySignature +```go +func VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, + plaintext []byte, tok shared.TipSetToken) (bool, error) +``` +Verify that `signature` is valid, cryptographically and otherwise, for the +given `signer`, `plaintext`, and `tok`. + +#### WaitForMessage +```go +func WaitForMessage(ctx context.Context, mcid cid.Cid, + onCompletion func(exitcode.ExitCode, []byte, error) error) error +``` +Wait for message CID `mcid` to appear on chain, and call `onCompletion` when it does so. + +#### SignBytes +```go +func SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) +``` + +Cryptographically sign bytes `b` using the private key referenced by address `signer`. + +#### GetMinerWorkerAddress +```go +func GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken, + ) (address.Address, error) +``` + +Get the miner worker address for the given miner owner, as of `tok`. + +--- +### StorageProviderNode +`StorageProviderNode` is the interface for dependencies for a `StorageProvider`. It contains: + +* [`StorageCommon`](#StorageCommon) interface +* [`PublishDeals`](#PublishDeals) +* [`ListProviderDeals`](#ListProviderDeals) +* [`OnDealSectorCommitted`](#OnDealSectorCommitted) +* [`LocatePieceForDealWithinSector`](#LocatePieceForDealWithinSector) +* [`OnDealExpiredOrSlashed`](#OnDealExpiredOrSlashed) + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### PublishDeals +```go +func PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) +``` +Post the deal to chain, returning the posted message CID. + +#### OnDealComplete +```go +func OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, + pieceReader io.Reader) error +``` +The function to be called when MinerDeal `deal` has reached the `storagemarket.StorageDealCompleted` state. +A `MinerDeal` contains more information than a StorageDeal, including paths, addresses, and CIDs +pertinent to the deal. See [storagemarket/types.go](./types.go) + +#### OnDealSectorCommitted +```go +func OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, + cb DealSectorCommittedCallback) error +``` + +Register the function to be called once `provider` has committed sector(s) for `dealID`. + +#### LocatePieceForDealWithinSector +```go +func LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken, + ) (sectorID uint64, offset uint64, length uint64, err error) +``` + +Find the piece associated with `dealID` as of `tok` and return the sector id, plus the offset and + length of the data within the sector. + +#### OnDealExpiredOrSlashed +```go +func OnDealExpiredOrSlashed( + ctx context.Context, + dealID abi.DealID, + onDealExpired DealExpiredCallback, + onDealSlashed DealSlashedCallback) error +``` + +Register callbacks to be called when a deal expires or is slashed. + +--- +### StorageClientNode +`StorageClientNode` implements dependencies for a StorageClient. It contains: +* [`StorageCommon`](#StorageCommon) interface +* [`GetChainHead`](#GetChainHead) +* [`ListClientDeals`](#ListClientDeals) +* [`ListStorageProviders`](#ListStorageProviders) +* [`ValidatePublishedDeal`](#ValidatePublishedDeal) +* [`SignProposal`](#SignProposal) +* [`GetDefaultWalletAddress`](#GetDefaultWalletAddress) +* [`OnDealSectorCommitted`](#OnDealSectorCommitted) +* [`OnDealExpiredOrSlashed`](#OnDealExpiredOrSlashed) + +#### StorageCommon +`StorageClientNode` implements `StorageCommon`, described above. + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### ListStorageProviders +```go +func ListStorageProviders(ctx context.Context, tok shared.TipSetToken + ) ([]*StorageProviderInfo, error) +``` + +Return a slice of `StorageProviderInfo`, for all known storage providers. + +#### ValidatePublishedDeal +```go +func ValidatePublishedDeal(ctx context.Context, deal ClientDeal) (abi.DealID, error) +``` +Query the chain for `deal` and inspect the message parameters to make sure they match the expected deal. Return the deal ID. + +#### SignProposal +```go +func SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal + ) (*market.ClientDealProposal, error) +``` + +Cryptographically sign `proposal` using the private key of `signer` and return a + ClientDealProposal (includes signature data). + +#### GetDefaultWalletAddress +```go +func GetDefaultWalletAddress(ctx context.Context) (address.Address, error) +``` + +Get the Client's default wallet address, which will be used to add Storage Market funds (collateral and payment). + +#### OnDealSectorCommitted +```go +func OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, + cb DealSectorCommittedCallback) error +``` + +Register a callback to be called once the Deal's sector(s) are committed. + +#### OnDealExpiredOrSlashed +```go +func OnDealExpiredOrSlashed( + ctx context.Context, + dealID abi.DealID, + onDealExpired DealExpiredCallback, + onDealSlashed DealSlashedCallback) error +``` + +Register callbacks to be called when a deal expires or is slashed. + +#### GetMinerInfo +```go +func GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken, + ) (*StorageProviderInfo, error) +``` + +Returns `StorageProviderInfo` for a specific provider at the given address + + +## Construction + +### StorageClient +To construct a new StorageClient: +```go +func NewClient( + net network.StorageMarketNetwork, + bs blockstore.Blockstore, + dataTransfer datatransfer.Manager, + discovery *discovery.Local, + ds datastore.Batching, + scn storagemarket.StorageClientNode, +) (*Client, error) +``` +**Parameters** + +* `net network.StorageMarketNetwork` is a network abstraction for the storage market. To create it, use: + ```go + package network + func NewFromLibp2pHost(h host.Host) StorageMarketNetwork + ``` +* `bs blockstore.Blockstore` is an IPFS blockstore for storing and retrieving data for deals. + See [github.com/ipfs/go-ipfs-blockstore](github.com/ipfs/go-ipfs-blockstore). +* `dataTransfer datatransfer.Manager` is an interface from [github.com/filecoin-project/go-data-transfer](https://github.com/filecoin-project/go-data-transfer) + There is more than one implementation, but one way to create a new datatransfer.Manager is: + ```go + package graphsyncimpl + + func NewGraphSyncDataTransfer(host host.Host, gs graphsync.GraphExchange, storedCounter *storedcounter.StoredCounter) datatransfer.Manager + ``` + Also: + ```go + package datatransfer + + // NewDAGServiceDataTransfer returns a data transfer manager based on + // an IPLD DAGService + func NewDAGServiceDataTransfer(dag ipldformat.DAGService) datatransfer.Manager + ``` + + Please see the [go-data-transfer repo](https://github.com/filecoin-project/go-data-transfer) for more information. + +* `discovery *discovery.Local` implements the `PeerResolver` interface. To initialize a new discovery.Local: + ```go + func NewLocal(ds datastore.Batching) *Local + ``` +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + +* `scn storagemarket.StorageClientNode` is the implementation of the [`StorageClientNode`](#StorageClientNode) API +that was written for your node. + +### StorageProvider +To construct a new StorageProvider: +```go +func NewProvider(net network.StorageMarketNetwork, + ds datastore.Batching, + bs blockstore.Blockstore, + fs filestore.FileStore, + pieceStore piecestore.PieceStore, + dataTransfer datatransfer.Manager, + spn storagemarket.StorageProviderNode, + minerAddress address.Address, + rt abi.RegisteredProof, + storedAsk StoredAsk, + options ...StorageProviderOption, +) (storagemarket.StorageProvider, error) { +``` + +**Parameters** +* `net network.StorageMarketNetwork` is the same interface as for [StorageClientNode](#StorageClientNode) +* `ds datastore.Batching` is the same interface as for [StorageClientNode](#StorageClientNode) +* `bs blockstore.Blockstore` is the same interface as for [StorageClientNode](#StorageClientNode) +* `fs filestore.FileStore` is an instance of the [filestore.FileStore](../filestore) struct from the + go-fil-markets repo. +* `pieceStore piecestore.PieceStore` is the database of deals and pieces associated with them. +See this repo's [piecestore module](../piecestore). +* `dataTransfer` is the same interface as for [StorageClientNode](#StorageClientNode) +* `spn storagemarket.StorageProviderNode` is the implementation of the [`StorageProviderNode`](#StorageProviderNode) API + that was written for your node. +* `minerAddress address.Address` is the miner owner address. +* `rt abi.RegisteredProof` is an int64 indicating the type of proof to use when generating a piece commitment (CommP). + see [github.com/filecoin-project/go-state-types/abi/sector.go](https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go) + for the list and meaning of accepted values. +* `storedAsk StoredAsk` is an interface for getting and adding storage Asks. It is implemented in storagemarket. + To create a `StoredAsk`: + ```go + package storedask + func NewStoredAsk(ds datastore.Batching, dsKey datastore.Key, spn storagemarket.StorageProviderNode, + actor address.Address) (*StoredAsk, error) + ``` +* `options ...StorageProviderOption` options is a variable length parameter to provide functions that change the + StorageProvider default configuration. See [provider.go](./impl/provider.go) for the available options. + +## Technical Documentation + +* [GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket) contains an architectural overview and robust API documentation + +* Storage Client FSM diagram: + +[![Diagram of StorageClientFSM](../docs/storageclient.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg) + + +* Storage Provider FSM diagram: + +[![Diagram of StorageClientFSM](../docs/storageprovider.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/client.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/client.go new file mode 100644 index 00000000000..fd86ed0c3fb --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/client.go @@ -0,0 +1,71 @@ +package storagemarket + +import ( + "context" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +type PayloadCID = cid.Cid + +// BlockstoreAccessor is used by the storage market client to get a +// blockstore when needed, concretely to send the payload to the provider. +// This abstraction allows the caller to provider any blockstore implementation: +// a CARv2 file, an IPFS blockstore, or something else. +// +// They key is a payload CID because this is the unique top-level key of a +// client-side data import. +type BlockstoreAccessor interface { + Get(PayloadCID) (bstore.Blockstore, error) + Done(PayloadCID) error +} + +// ClientSubscriber is a callback that is run when events are emitted on a StorageClient +type ClientSubscriber func(event ClientEvent, deal ClientDeal) + +// StorageClient is a client interface for making storage deals with a StorageProvider +type StorageClient interface { + + // Start initializes deal processing on a StorageClient and restarts + // in progress deals + Start(ctx context.Context) error + + // OnReady registers a listener for when the client comes on line + OnReady(shared.ReadyFunc) + + // Stop ends deal processing on a StorageClient + Stop() error + + // ListProviders queries chain state and returns active storage providers + ListProviders(ctx context.Context) (<-chan StorageProviderInfo, error) + + // ListLocalDeals lists deals initiated by this storage client + ListLocalDeals(ctx context.Context) ([]ClientDeal, error) + + // GetLocalDeal lists deals that are in progress or rejected + GetLocalDeal(ctx context.Context, cid cid.Cid) (ClientDeal, error) + + // GetAsk returns the current ask for a storage provider + GetAsk(ctx context.Context, info StorageProviderInfo) (*StorageAsk, error) + + // GetProviderDealState queries a provider for the current state of a client's deal + GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*ProviderDealState, error) + + // ProposeStorageDeal initiates deal negotiation with a Storage Provider + ProposeStorageDeal(ctx context.Context, params ProposeStorageDealParams) (*ProposeStorageDealResult, error) + + // GetPaymentEscrow returns the current funds available for deal payment + GetPaymentEscrow(ctx context.Context, addr address.Address) (Balance, error) + + // AddStorageCollateral adds storage collateral + AddPaymentEscrow(ctx context.Context, addr address.Address, amount abi.TokenAmount) error + + // SubscribeToEvents listens for events that happen related to storage deals on a provider + SubscribeToEvents(subscriber ClientSubscriber) shared.Unsubscribe +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/dealstatus.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/dealstatus.go new file mode 100644 index 00000000000..906d624ae07 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/dealstatus.go @@ -0,0 +1,232 @@ +package storagemarket + +// StorageDealStatus is the local status of a StorageDeal. +// Note: this status has meaning in the context of this module only - it is not +// recorded on chain +type StorageDealStatus = uint64 + +const ( + // StorageDealUnknown means the current status of a deal is undefined + StorageDealUnknown = StorageDealStatus(iota) + + // StorageDealProposalNotFound is a status returned in responses when the deal itself cannot + // be located + StorageDealProposalNotFound + + // StorageDealProposalRejected is returned by a StorageProvider when it chooses not to accept + // a DealProposal + StorageDealProposalRejected + + // StorageDealProposalAccepted indicates an intent to accept a storage deal proposal + StorageDealProposalAccepted + + // StorageDealStaged means a deal has been published and data is ready to be put into a sector + StorageDealStaged + + // StorageDealSealing means a deal is in a sector that is being sealed + StorageDealSealing + + // StorageDealFinalizing means a deal is in a sealed sector and we're doing final + // housekeeping before marking it active + StorageDealFinalizing + + // StorageDealActive means a deal is in a sealed sector and the miner is proving the data + // for the deal + StorageDealActive + + // StorageDealExpired means a deal has passed its final epoch and is expired + StorageDealExpired + + // StorageDealSlashed means the deal was in a sector that got slashed from failing to prove + StorageDealSlashed + + // StorageDealRejecting means the Provider has rejected the deal, and will send a rejection response + StorageDealRejecting + + // StorageDealFailing means something has gone wrong in a deal. Once data is cleaned up the deal will finalize on + // StorageDealError + StorageDealFailing + + // StorageDealFundsReserved means we've deposited funds as necessary to create a deal, ready to move forward + StorageDealFundsReserved + + // StorageDealCheckForAcceptance means the client is waiting for a provider to seal and publish a deal + StorageDealCheckForAcceptance + + // StorageDealValidating means the provider is validating that deal parameters are good for a proposal + StorageDealValidating + + // StorageDealAcceptWait means the provider is running any custom decision logic to decide whether or not to accept the deal + StorageDealAcceptWait + + // StorageDealStartDataTransfer means data transfer is beginning + StorageDealStartDataTransfer + + // StorageDealTransferring means data is being sent from the client to the provider via the data transfer module + StorageDealTransferring + + // StorageDealWaitingForData indicates either a manual transfer + // or that the provider has not received a data transfer request from the client + StorageDealWaitingForData + + // StorageDealVerifyData means data has been transferred and we are attempting to verify it against the PieceCID + StorageDealVerifyData + + // StorageDealReserveProviderFunds means that provider is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealReserveProviderFunds + + // StorageDealReserveClientFunds means that client is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealReserveClientFunds + + // StorageDealProviderFunding means that the provider has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealProviderFunding + + // StorageDealClientFunding means that the client has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealClientFunding + + // StorageDealPublish means the deal is ready to be published on chain + StorageDealPublish + + // StorageDealPublishing means the deal has been published but we are waiting for it to appear on chain + StorageDealPublishing + + // StorageDealError means the deal has failed due to an error, and no further updates will occur + StorageDealError + + // StorageDealProviderTransferAwaitRestart means the provider has restarted while data + // was being transferred from client to provider, and will wait for the client to + // resume the transfer + StorageDealProviderTransferAwaitRestart + + // StorageDealClientTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the client + StorageDealClientTransferRestart + + // StorageDealAwaitingPreCommit means a deal is ready and must be pre-committed + StorageDealAwaitingPreCommit + + // StorageDealTransferQueued means the data transfer request has been queued and will be executed soon. + StorageDealTransferQueued + + StorageDealReserveProviderFundsOfSxx + + StorageDealPublishOfSxx + + StorageDealPublishingOfSxx + + StorageDealStagedOfSxx + +) + +// DealStates maps StorageDealStatus codes to string names +var DealStates = map[StorageDealStatus]string{ + StorageDealUnknown: "StorageDealUnknown", + StorageDealProposalNotFound: "StorageDealProposalNotFound", + StorageDealProposalRejected: "StorageDealProposalRejected", + StorageDealProposalAccepted: "StorageDealProposalAccepted", + StorageDealAcceptWait: "StorageDealAcceptWait", + StorageDealStartDataTransfer: "StorageDealStartDataTransfer", + StorageDealStaged: "StorageDealStaged", + StorageDealStagedOfSxx: "StorageDealStagedOfSxx", + StorageDealAwaitingPreCommit: "StorageDealAwaitingPreCommit", + StorageDealSealing: "StorageDealSealing", + StorageDealActive: "StorageDealActive", + StorageDealExpired: "StorageDealExpired", + StorageDealSlashed: "StorageDealSlashed", + StorageDealRejecting: "StorageDealRejecting", + StorageDealFailing: "StorageDealFailing", + StorageDealFundsReserved: "StorageDealFundsReserved", + StorageDealCheckForAcceptance: "StorageDealCheckForAcceptance", + StorageDealValidating: "StorageDealValidating", + StorageDealTransferring: "StorageDealTransferring", + StorageDealWaitingForData: "StorageDealWaitingForData", + StorageDealVerifyData: "StorageDealVerifyData", + StorageDealReserveProviderFunds: "StorageDealReserveProviderFunds", + StorageDealReserveProviderFundsOfSxx: "StorageDealReserveProviderFundsOfSxx", + StorageDealReserveClientFunds: "StorageDealReserveClientFunds", + StorageDealProviderFunding: "StorageDealProviderFunding", + StorageDealClientFunding: "StorageDealClientFunding", + StorageDealPublish: "StorageDealPublish", + StorageDealPublishOfSxx: "StorageDealPublishOfSxx", + StorageDealPublishing: "StorageDealPublishing", + StorageDealPublishingOfSxx: "StorageDealPublishingOfSxx", + StorageDealError: "StorageDealError", + StorageDealFinalizing: "StorageDealFinalizing", + StorageDealClientTransferRestart: "StorageDealClientTransferRestart", + StorageDealProviderTransferAwaitRestart: "StorageDealProviderTransferAwaitRestart", + StorageDealTransferQueued: "StorageDealTransferQueued", +} + +// DealStatesDescriptions maps StorageDealStatus codes to string description for better UX +var DealStatesDescriptions = map[StorageDealStatus]string{ + StorageDealUnknown: "Unknown", + StorageDealProposalNotFound: "Proposal not found", + StorageDealProposalRejected: "Proposal rejected", + StorageDealProposalAccepted: "Proposal accepted", + StorageDealAcceptWait: "AcceptWait", + StorageDealStartDataTransfer: "Starting data transfer", + StorageDealStaged: "Staged", + StorageDealStagedOfSxx: "Staged", + StorageDealAwaitingPreCommit: "Awaiting a PreCommit message on chain", + StorageDealSealing: "Sealing", + StorageDealActive: "Active", + StorageDealExpired: "Expired", + StorageDealSlashed: "Slashed", + StorageDealRejecting: "Rejecting", + StorageDealFailing: "Failing", + StorageDealFundsReserved: "FundsReserved", + StorageDealCheckForAcceptance: "Checking for deal acceptance", + StorageDealValidating: "Validating", + StorageDealTransferring: "Transferring", + StorageDealWaitingForData: "Waiting for data", + StorageDealVerifyData: "Verifying data", + StorageDealReserveProviderFunds: "Reserving provider funds", + StorageDealReserveProviderFundsOfSxx: "Reserving provider funds", + StorageDealReserveClientFunds: "Reserving client funds", + StorageDealProviderFunding: "Provider funding", + StorageDealClientFunding: "Client funding", + StorageDealPublish: "Publish", + StorageDealPublishOfSxx: "Publish", + StorageDealPublishing: "Publishing", + StorageDealPublishingOfSxx: "Publishing", + StorageDealError: "Error", + StorageDealFinalizing: "Finalizing", + StorageDealClientTransferRestart: "Client transfer restart", + StorageDealProviderTransferAwaitRestart: "ProviderTransferAwaitRestart", +} + +var DealStatesDurations = map[StorageDealStatus]string{ + StorageDealUnknown: "", + StorageDealProposalNotFound: "", + StorageDealProposalRejected: "", + StorageDealProposalAccepted: "a few minutes", + StorageDealAcceptWait: "a few minutes", + StorageDealStartDataTransfer: "a few minutes", + StorageDealStaged: "a few minutes", + StorageDealStagedOfSxx: "a few minutes", + StorageDealAwaitingPreCommit: "a few minutes", + StorageDealSealing: "a few hours", + StorageDealActive: "", + StorageDealExpired: "", + StorageDealSlashed: "", + StorageDealRejecting: "", + StorageDealFailing: "", + StorageDealFundsReserved: "a few minutes", + StorageDealCheckForAcceptance: "a few minutes", + StorageDealValidating: "a few minutes", + StorageDealTransferring: "a few minutes", + StorageDealWaitingForData: "a few minutes", + StorageDealVerifyData: "a few minutes", + StorageDealReserveProviderFunds: "a few minutes", + StorageDealReserveClientFunds: "a few minutes", + StorageDealProviderFunding: "a few minutes", + StorageDealClientFunding: "a few minutes", + StorageDealPublish: "a few minutes", + StorageDealPublishing: "a few minutes", + StorageDealError: "", + StorageDealFinalizing: "a few minutes", + StorageDealClientTransferRestart: "depending on data size, anywhere between a few minutes to a few hours", + StorageDealProviderTransferAwaitRestart: "a few minutes", +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/doc.go new file mode 100644 index 00000000000..7d912bbbc68 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/doc.go @@ -0,0 +1,124 @@ +/* +Package storagemarket implements the Filecoin storage protocol. + +An overview of the storage protocol can be found in the Filecoin specification: + +https://filecoin-project.github.io/specs/#systems__filecoin_markets__storage_market + +The following architectural components provide a brief overview of the design of +the storagemarket module: + +Public Interfaces And Node Dependencies + +A core goal of this module is to isolate the negotiation of deals from the actual chain operations +performed by the node to put the deal on chain. The module primarily orchestrates the storage deal +flow, rather than performing specific chain operations which are delegated to the node. + +As such, for both the client and the provider in the storage market, the module defines a top level +public interface which it provides an implementation for, and a node interface that must be implemented +by the Filecoin node itself, and provided as a dependency. These node interfaces provide a universal way to +talk to potentially multiple different Filecoin node implementations, and can be implemented using HTTP +or some other interprocess communication to talk to a node implementation running in a different process. + +The top level interfaces this package implements are StorageClient & StorageProvider. The dependencies the Filecoin +node is expected to implement are StorageClientNode & StorageProviderNode. Further documentation of exactly what those +dependencies should do can be found in the readme. + +Finite State Machines and Resumability + +Making deals in Filecoin is a highly asynchronous process. For a large piece of data, it's possible that the entire +process of proposing a deal, transferring data, publishing the deal, putting the data in a sector and sealing it +could take hours or even days. Not surprisingly, many things can go wrong along the way. To manage the process +of orchestrating deals, we use finite state machines that update deal state when discrete events occur. State updates +always persist state to disk. This means we have a permanent record of exactly what's going on with deals at any time, +and we can ideally survive our Filecoin processes shutting down and restarting. + +The following diagrams visualize the statemachine flows for the client and the provider: + +Client FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg + +Provider FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg + +Identifying Providers For A Deal + +The StorageClient provides two functions to locate a provider with whom to make a deal: + +`ListProviders` returns a list of storage providers on the Filecoin network. This list is assembled by +querying the chain state for active storage miners. + +`QueryAsk` queries a single provider for more specific details about the kinds of deals they accept, as +expressed through a `StorageAsk`. + +Deal Flow + +The primary mechanism for initiating storage deals is the `ProposeStorageDeal` method on the StorageClient. + +When `ProposeStorageDeal` is called, it constructs and signs a DealProposal, initiates tracking of deal state +and hands the deal to the Client FSM, returning the CID of the DealProposal which constitutes the identifier for +that deal. + +After some preparation steps, the FSM will send the deal proposal to the StorageProvider, which receives the deal +in `HandleDealStream`. `HandleDealStream` initiates tracking of deal state on the Provider side and hands the deal to +the Provider FSM, which handles the rest of deal flow. + +From this point forward, deal negotiation is completely asynchronous and runs in the FSMs. + +A user of the modules can monitor deal progress through `SubscribeToEvents` methods on StorageClient and StorageProvider, +or by simply calling `ListLocalDeals` to get all deal statuses. + +The FSMs implement every step in deal negotiation up to deal publishing. However, adding the deal to a sector and sealing +it is handled outside this module. When a deal is published, the StorageProvider calls `OnDealComplete` on the StorageProviderNode +interface (the node itself likely delegates management of sectors and sealing to an implementation of the Storage Mining subsystem +of the Filecoin spec). At this point, the markets implementations essentially shift to being monitors of deal progression: +they wait to see and record when the deal becomes active and later expired or slashed. + +When a deal becomes active on chain, the provider records the location of where it's stored in a sector in the PieceStore, +so that it's available for retrieval. + +Major Dependencies + +Other libraries in go-fil-markets: + +https://github.com/filecoin-project/go-fil-markets/tree/master/filestore - used to store pieces and other +temporary data before it's transferred to either a sector or the PieceStore. + +https://github.com/filecoin-project/go-fil-markets/tree/master/pieceio - used to convert back and forth between raw +payload data and pieces that fit in sector. Also provides utilities for generating CommP. + +https://github.com/filecoin-project/go-fil-markets/tree/master/piecestore - used to write information about where data +lives in sectors so that it can later be retrieved. + +https://github.com/filecoin-project/go-fil-markets/tree/master/shared - types and utility functions shared with +retrievalmarket package. + +Other Filecoin Repos: + +https://github.com/filecoin-project/go-data-transfer - for transferring data, via go-graphsync + +https://github.com/filecoin-project/go-statemachine - a finite state machine that tracks deal state + +https://github.com/filecoin-project/go-storedcounter - for generating and persisting unique deal IDs + +https://github.com/filecoin-project/specs-actors - the Filecoin actors + +IPFS Project Repos: + +https://github.com/ipfs/go-graphsync - used by go-data-transfer + +https://github.com/ipfs/go-datastore - for persisting statemachine state for deals + +https://github.com/ipfs/go-ipfs-blockstore - for storing and retrieving block data for deals + +Other Repos: + +https://github.com/libp2p/go-libp2p) the network over which retrieval deal data is exchanged. + +https://github.com/hannahhoward/go-pubsub - for pub/sub notifications external to the statemachine + +Root Package + +This top level package defines top level enumerations and interfaces. The primary implementation +lives in the `impl` directory + +*/ +package storagemarket diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/events.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/events.go new file mode 100644 index 00000000000..5facc061bc1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/events.go @@ -0,0 +1,362 @@ +package storagemarket + +import "fmt" + +// ClientEvent is an event that happens in the client's deal state machine +type ClientEvent uint64 + +const ( + // ClientEventOpen indicates a new deal was started + ClientEventOpen ClientEvent = iota + + // ClientEventReserveFundsFailed happens when attempting to reserve funds for a deal fails + ClientEventReserveFundsFailed + + // ClientEventFundingInitiated happens when a client has sent a message adding funds to its balance + ClientEventFundingInitiated + + // ClientEventFundsReserved happens when a client reserves funds for a deal (updating our tracked funds) + ClientEventFundsReserved + + // ClientEventFundsReleased happens when a client released funds for a deal (updating our tracked funds) + ClientEventFundsReleased + + // ClientEventFundingComplete happens when a client successfully reserves funds for a deal + ClientEventFundingComplete + + // ClientEventWriteProposalFailed indicates an attempt to send a deal proposal to a provider failed + ClientEventWriteProposalFailed + + // ClientEventInitiateDataTransfer happens when a a client is ready to transfer data to a provider + ClientEventInitiateDataTransfer + + // ClientEventDataTransferInitiated happens when piece data transfer has started + ClientEventDataTransferInitiated + + // ClientEventDataTransferRestarted happens when a data transfer from client to provider is restarted by the client + ClientEventDataTransferRestarted + + // ClientEventDataTransferComplete happens when piece data transfer has been completed + ClientEventDataTransferComplete + + // ClientEventWaitForDealState happens when the client needs to continue waiting for an actionable deal state + ClientEventWaitForDealState + + // ClientEventDataTransferFailed happens the client can't initiate a push data transfer to the provider + ClientEventDataTransferFailed + + // ClientEventDataTransferRestartFailed happens when the client can't restart an existing data transfer + ClientEventDataTransferRestartFailed + + // ClientEventReadResponseFailed means a network error occurred reading a deal response + ClientEventReadResponseFailed + + // ClientEventResponseVerificationFailed means a response was not verified + ClientEventResponseVerificationFailed + + // ClientEventResponseDealDidNotMatch means a response was sent for the wrong deal + ClientEventResponseDealDidNotMatch + + // ClientEventUnexpectedDealState means a response was sent but the state wasn't what we expected + ClientEventUnexpectedDealState + + // ClientEventStreamCloseError happens when an attempt to close a deals stream fails + ClientEventStreamCloseError + + // ClientEventDealRejected happens when the provider does not accept a deal + ClientEventDealRejected + + // ClientEventDealAccepted happens when a client receives a response accepting a deal from a provider + ClientEventDealAccepted + + // ClientEventDealPublishFailed happens when a client cannot verify a deal was published + ClientEventDealPublishFailed + + // ClientEventDealPublished happens when a deal is successfully published + ClientEventDealPublished + + // ClientEventDealPrecommitFailed happens when an error occurs waiting for deal pre-commit + ClientEventDealPrecommitFailed + + // ClientEventDealPrecommitted happens when a deal is successfully pre-commited + ClientEventDealPrecommitted + + // ClientEventDealActivationFailed happens when a client cannot verify a deal was activated + ClientEventDealActivationFailed + + // ClientEventDealActivated happens when a deal is successfully activated + ClientEventDealActivated + + // ClientEventDealCompletionFailed happens when a client cannot verify a deal expired or was slashed + ClientEventDealCompletionFailed + + // ClientEventDealExpired happens when a deal expires + ClientEventDealExpired + + // ClientEventDealSlashed happens when a deal is slashed + ClientEventDealSlashed + + // ClientEventFailed happens when a deal terminates in failure + ClientEventFailed + + // ClientEventRestart is used to resume the deal after a state machine shutdown + ClientEventRestart + + // ClientEventDataTransferStalled happens when the clients data transfer experiences a disconnect + ClientEventDataTransferStalled + + // ClientEventDataTransferCancelled happens when a data transfer is cancelled + ClientEventDataTransferCancelled + + // ClientEventDataTransferQueued happens when we queue the provider's request to transfer data to it + // in response to the push request we send to the provider. + ClientEventDataTransferQueued +) + +// ClientEvents maps client event codes to string names +var ClientEvents = map[ClientEvent]string{ + ClientEventOpen: "ClientEventOpen", + ClientEventReserveFundsFailed: "ClientEventReserveFundsFailed", + ClientEventFundingInitiated: "ClientEventFundingInitiated", + ClientEventFundsReserved: "ClientEventFundsReserved", + ClientEventFundsReleased: "ClientEventFundsReleased", + ClientEventFundingComplete: "ClientEventFundingComplete", + ClientEventWriteProposalFailed: "ClientEventWriteProposalFailed", + ClientEventInitiateDataTransfer: "ClientEventInitiateDataTransfer", + ClientEventDataTransferInitiated: "ClientEventDataTransferInitiated", + ClientEventDataTransferComplete: "ClientEventDataTransferComplete", + ClientEventWaitForDealState: "ClientEventWaitForDealState", + ClientEventDataTransferFailed: "ClientEventDataTransferFailed", + ClientEventReadResponseFailed: "ClientEventReadResponseFailed", + ClientEventResponseVerificationFailed: "ClientEventResponseVerificationFailed", + ClientEventResponseDealDidNotMatch: "ClientEventResponseDealDidNotMatch", + ClientEventUnexpectedDealState: "ClientEventUnexpectedDealState", + ClientEventStreamCloseError: "ClientEventStreamCloseError", + ClientEventDealRejected: "ClientEventDealRejected", + ClientEventDealAccepted: "ClientEventDealAccepted", + ClientEventDealPublishFailed: "ClientEventDealPublishFailed", + ClientEventDealPublished: "ClientEventDealPublished", + ClientEventDealActivationFailed: "ClientEventDealActivationFailed", + ClientEventDealActivated: "ClientEventDealActivated", + ClientEventDealCompletionFailed: "ClientEventDealCompletionFailed", + ClientEventDealExpired: "ClientEventDealExpired", + ClientEventDealSlashed: "ClientEventDealSlashed", + ClientEventFailed: "ClientEventFailed", + ClientEventRestart: "ClientEventRestart", + ClientEventDataTransferRestarted: "ClientEventDataTransferRestarted", + ClientEventDataTransferRestartFailed: "ClientEventDataTransferRestartFailed", + ClientEventDataTransferStalled: "ClientEventDataTransferStalled", + ClientEventDataTransferCancelled: "ClientEventDataTransferCancelled", + ClientEventDataTransferQueued: "ClientEventDataTransferQueued", +} + +func (e ClientEvent) String() string { + str, ok := ClientEvents[e] + if ok { + return str + } + return fmt.Sprintf("ClientEventUnknown - %d", e) +} + +// ProviderEvent is an event that happens in the provider's deal state machine +type ProviderEvent uint64 + +const ( + // ProviderEventOpen indicates a new deal proposal has been received + ProviderEventOpen ProviderEvent = iota + + // ProviderEventNodeErrored indicates an error happened talking to the node implementation + ProviderEventNodeErrored + + // ProviderEventDealDeciding happens when a deal is being decided on by the miner + ProviderEventDealDeciding + + // ProviderEventDealRejected happens when a deal proposal is rejected for not meeting criteria + ProviderEventDealRejected + + // ProviderEventRejectionSent happens after a deal proposal rejection has been sent to the client + ProviderEventRejectionSent + + // ProviderEventDealAccepted happens when a deal is accepted based on provider criteria + ProviderEventDealAccepted + + // ProviderEventInsufficientFunds indicates not enough funds available for a deal + ProviderEventInsufficientFunds + + // ProviderEventFundsReserved indicates we've reserved funds for a deal, adding to our overall total + ProviderEventFundsReserved + + // ProviderEventFundsReleased indicates we've released funds for a deal + ProviderEventFundsReleased + + // ProviderEventFundingInitiated indicates provider collateral funding has been initiated + ProviderEventFundingInitiated + + // ProviderEventFunded indicates provider collateral has appeared in the storage market balance + ProviderEventFunded + + // ProviderEventDataTransferFailed happens when an error occurs transferring data + ProviderEventDataTransferFailed + + // ProviderEventDataRequested happens when a provider requests data from a client + ProviderEventDataRequested + + // ProviderEventDataTransferInitiated happens when a data transfer starts + ProviderEventDataTransferInitiated + + // ProviderEventDataTransferRestarted happens when a data transfer restarts + ProviderEventDataTransferRestarted + + // ProviderEventDataTransferCompleted happens when a data transfer is successful + ProviderEventDataTransferCompleted + + // ProviderEventManualDataReceived happens when data is received manually for an offline deal + ProviderEventManualDataReceived + + // ProviderEventDataVerificationFailed happens when an error occurs validating deal data + ProviderEventDataVerificationFailed + + // ProviderEventVerifiedData happens when received data is verified as matching the pieceCID in a deal proposal + ProviderEventVerifiedData + + // ProviderEventSendResponseFailed happens when a response cannot be sent to a deal + ProviderEventSendResponseFailed + + // ProviderEventDealPublishInitiated happens when a provider has sent a PublishStorageDeals message to the chain + ProviderEventDealPublishInitiated + + // ProviderEventDealPublished happens when a deal is successfully published + ProviderEventDealPublished + + // ProviderEventDealPublishError happens when PublishStorageDeals returns a non-ok exit code + ProviderEventDealPublishError + + // ProviderEventFileStoreErrored happens when an error occurs accessing the filestore + ProviderEventFileStoreErrored + + // ProviderEventDealHandoffFailed happens when an error occurs handing off a deal with OnDealComplete + ProviderEventDealHandoffFailed + + // ProviderEventDealHandedOff happens when a deal is successfully handed off to the node for processing in a sector + ProviderEventDealHandedOff + + // ProviderEventDealPrecommitFailed happens when an error occurs waiting for deal pre-commit + ProviderEventDealPrecommitFailed + + // ProviderEventDealPrecommitted happens when a deal is successfully pre-commited + ProviderEventDealPrecommitted + + // ProviderEventDealActivationFailed happens when an error occurs activating a deal + ProviderEventDealActivationFailed + + // ProviderEventDealActivated happens when a deal is successfully activated and commited to a sector + ProviderEventDealActivated + + // ProviderEventPieceStoreErrored happens when an attempt to save data in the piece store errors + ProviderEventPieceStoreErrored + + // ProviderEventFinalized happens when final housekeeping is complete and a deal is active + ProviderEventFinalized + + // ProviderEventDealCompletionFailed happens when a miner cannot verify a deal expired or was slashed + ProviderEventDealCompletionFailed + + // ProviderEventMultistoreErrored indicates an error happened with a store for a deal + ProviderEventMultistoreErrored + + // ProviderEventDealExpired happens when a deal expires + ProviderEventDealExpired + + // ProviderEventDealSlashed happens when a deal is slashed + ProviderEventDealSlashed + + // ProviderEventFailed indicates a deal has failed and should no longer be processed + ProviderEventFailed + + // ProviderEventTrackFundsFailed indicates a failure trying to locally track funds needed for deals + ProviderEventTrackFundsFailed + + // ProviderEventRestart is used to resume the deal after a state machine shutdown + ProviderEventRestart + + // ProviderEventDataTransferRestartFailed means a data transfer that was restarted by the provider failed + // Deprecated: this event is no longer used + ProviderEventDataTransferRestartFailed + + // ProviderEventDataTransferStalled happens when the providers data transfer experiences a disconnect + ProviderEventDataTransferStalled + + // ProviderEventDataTransferCancelled happens when a data transfer is cancelled + ProviderEventDataTransferCancelled + + // ProviderEventAwaitTransferRestartTimeout is dispatched after a certain amount of time a provider has been + // waiting for a data transfer to restart. If transfer hasn't restarted, the provider will fail the deal + ProviderEventAwaitTransferRestartTimeout + + ProviderEventVerifiedDataOfSxx + + ProviderEventFundedOfSxx + + ProviderEventDealPublishInitiatedOfSxx + + ProviderEventDealPublishedOfSxx +) + +// ProviderEvents maps provider event codes to string names +var ProviderEvents = map[ProviderEvent]string{ + ProviderEventOpen: "ProviderEventOpen", + ProviderEventNodeErrored: "ProviderEventNodeErrored", + ProviderEventDealRejected: "ProviderEventDealRejected", + ProviderEventRejectionSent: "ProviderEventRejectionSent", + ProviderEventDealAccepted: "ProviderEventDealAccepted", + ProviderEventDealDeciding: "ProviderEventDealDeciding", + ProviderEventInsufficientFunds: "ProviderEventInsufficientFunds", + ProviderEventFundsReserved: "ProviderEventFundsReserved", + ProviderEventFundsReleased: "ProviderEventFundsReleased", + ProviderEventFundingInitiated: "ProviderEventFundingInitiated", + ProviderEventFunded: "ProviderEventFunded", + ProviderEventFundedOfSxx: "ProviderEventFundedOfSxx", + ProviderEventDataTransferFailed: "ProviderEventDataTransferFailed", + ProviderEventDataRequested: "ProviderEventDataRequested", + ProviderEventDataTransferInitiated: "ProviderEventDataTransferInitiated", + ProviderEventDataTransferCompleted: "ProviderEventDataTransferCompleted", + ProviderEventManualDataReceived: "ProviderEventManualDataReceived", + ProviderEventDataVerificationFailed: "ProviderEventDataVerificationFailed", + ProviderEventVerifiedData: "ProviderEventVerifiedData", + ProviderEventVerifiedDataOfSxx: "ProviderEventVerifiedDataOfSxx", + ProviderEventSendResponseFailed: "ProviderEventSendResponseFailed", + ProviderEventDealPublishInitiated: "ProviderEventDealPublishInitiated", + ProviderEventDealPublishInitiatedOfSxx: "ProviderEventDealPublishInitiatedOfSxx", + ProviderEventDealPublished: "ProviderEventDealPublished", + ProviderEventDealPublishedOfSxx: "ProviderEventDealPublishedOfSxx", + ProviderEventDealPublishError: "ProviderEventDealPublishError", + ProviderEventFileStoreErrored: "ProviderEventFileStoreErrored", + ProviderEventDealHandoffFailed: "ProviderEventDealHandoffFailed", + ProviderEventDealHandedOff: "ProviderEventDealHandedOff", + ProviderEventDealActivationFailed: "ProviderEventDealActivationFailed", + ProviderEventDealActivated: "ProviderEventDealActivated", + ProviderEventPieceStoreErrored: "ProviderEventPieceStoreErrored", + ProviderEventFinalized: "ProviderEventCleanupFinished", + ProviderEventDealCompletionFailed: "ProviderEventDealCompletionFailed", + ProviderEventMultistoreErrored: "ProviderEventMultistoreErrored", + ProviderEventDealExpired: "ProviderEventDealExpired", + ProviderEventDealSlashed: "ProviderEventDealSlashed", + ProviderEventFailed: "ProviderEventFailed", + ProviderEventTrackFundsFailed: "ProviderEventTrackFundsFailed", + ProviderEventRestart: "ProviderEventRestart", + ProviderEventDataTransferRestarted: "ProviderEventDataTransferRestarted", + ProviderEventDataTransferRestartFailed: "ProviderEventDataTransferRestartFailed", + ProviderEventDataTransferStalled: "ProviderEventDataTransferStalled", + ProviderEventDataTransferCancelled: "ProviderEventDataTransferCancelled", + ProviderEventDealPrecommitFailed: "ProviderEventDealPrecommitFailed", + ProviderEventDealPrecommitted: "ProviderEventDealPrecommitted", + ProviderEventAwaitTransferRestartTimeout: "ProviderEventAwaitTransferRestartTimeout", +} + +func (e ProviderEvent) String() string { + str, ok := ProviderEvents[e] + if ok { + return str + } + return fmt.Sprintf("ProviderEventUnknown - %d", e) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/duplicate_blocks.txt b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/duplicate_blocks.txt new file mode 100644 index 00000000000..53695d7b95f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/duplicate_blocks.txt @@ -0,0 +1 @@ +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload.txt b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload.txt new file mode 100644 index 00000000000..fd4a2f3c1ff --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload.txt @@ -0,0 +1,49 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. + +Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. + +Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. + +Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. + +Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. + +Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. + +Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. + +Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. + +Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. + +Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. + +Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. + +Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. + +Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. + +Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. + +Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. + +Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. + +Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. + +Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. + +Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. + +Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. + +Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. + +Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. + +Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. + +Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. + +Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload2.txt b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload2.txt new file mode 100644 index 00000000000..16fb150f5b2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/fixtures/payload2.txt @@ -0,0 +1,49 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. + +Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. + +Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. + +Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. + +Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. + +Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. + +Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. + +Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. + +Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. + +Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. + +Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. + +Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. + +Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. + +Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. + +Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. + +Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. + +Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. + +Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. + +Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. + +Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. + +Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. + +Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. + +Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. + +Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. + +Eros in cursus turpis massa tincidunt dui ut. Aarsh shah is simply an amazing person. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder.go new file mode 100644 index 00000000000..a57ca7b3a14 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder.go @@ -0,0 +1,55 @@ +/* +Package blockrecorder provides utilits to record locations of CIDs to a +temporary metadata file, since writing a CAR happens BEFORE we actually hand off for sealing. +The metadata file is later used to populate the PieceStore +*/ +package blockrecorder + +import ( + "bufio" + "io" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" +) + +//go:generate cbor-gen-for PieceBlockMetadata + +// PieceBlockMetadata is a record of where a given CID lives in a piece, +// in terms of its offset and size +type PieceBlockMetadata struct { + CID cid.Cid + Offset uint64 + Size uint64 +} + +// RecordEachBlockTo returns a OnNewCarBlockFunc that records the exact +// location of a given block's data in a CAR file, and writes that data +// to the given writer +func RecordEachBlockTo(out io.Writer) car.OnNewCarBlockFunc { + return func(block car.Block) error { + pbMetadata := &PieceBlockMetadata{ + CID: block.BlockCID, + Offset: block.Offset + block.Size - uint64(len(block.Data)), + Size: uint64(len(block.Data)), + } + return pbMetadata.MarshalCBOR(out) + } +} + +// ReadBlockMetadata reads previously recorded block metadata +func ReadBlockMetadata(input io.Reader) ([]PieceBlockMetadata, error) { + var metadatas []PieceBlockMetadata + buf := bufio.NewReaderSize(input, 16) + for { + var nextMetadata PieceBlockMetadata + err := nextMetadata.UnmarshalCBOR(buf) + if err != nil { + if err != io.EOF { + return nil, err + } + return metadatas, nil + } + metadatas = append(metadatas, nextMetadata) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go new file mode 100644 index 00000000000..416931d41df --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go @@ -0,0 +1,120 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package blockrecorder + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufPieceBlockMetadata = []byte{131} + +func (t *PieceBlockMetadata) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufPieceBlockMetadata); err != nil { + return err + } + + // t.CID (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.Offset (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Size (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + return nil +} + +func (t *PieceBlockMetadata) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceBlockMetadata{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.Offset (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = uint64(extra) + + } + // t.Size (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_test.go new file mode 100644 index 00000000000..884d621882f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/blockrecorder/blockrecorder_test.go @@ -0,0 +1,80 @@ +package blockrecorder_test + +import ( + "bytes" + "context" + "testing" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipld/go-car" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" +) + +func TestBlockRecording(t *testing.T) { + testData := shared_testutil.NewTestIPLDTree() + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { + efsb.Insert("linkedMap", + ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge()))) + }).Node() + + ctx := context.Background() + sc := car.NewSelectiveCar(ctx, testData, []car.Dag{ + car.Dag{ + Root: testData.RootNodeLnk.(cidlink.Link).Cid, + Selector: node, + }, + }) + + carBuf := new(bytes.Buffer) + blockLocationBuf := new(bytes.Buffer) + err := sc.Write(carBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) + require.NoError(t, err) + + metadata, err := blockrecorder.ReadBlockMetadata(blockLocationBuf) + require.NoError(t, err) + + blks := []blocks.Block{ + testData.LeafAlphaBlock, + testData.MiddleMapBlock, + testData.RootBlock, + } + carBytes := carBuf.Bytes() + for _, blk := range blks { + cid := blk.Cid() + var found bool + var metadatum blockrecorder.PieceBlockMetadata + for _, testMetadatum := range metadata { + if testMetadatum.CID.Equals(cid) { + metadatum = testMetadatum + found = true + break + } + } + require.True(t, found) + testBuf := carBytes[metadatum.Offset : metadatum.Offset+metadatum.Size] + require.Equal(t, blk.RawData(), testBuf) + } + missingBlks := []blocks.Block{ + testData.LeafBetaBlock, + testData.MiddleListBlock, + } + for _, blk := range missingBlks { + cid := blk.Cid() + var found bool + for _, testMetadatum := range metadata { + if testMetadatum.CID.Equals(cid) { + found = true + break + } + } + require.False(t, found) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client.go new file mode 100644 index 00000000000..027034196e9 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client.go @@ -0,0 +1,613 @@ +package storageimpl + +import ( + "context" + "fmt" + "time" + + "github.com/hannahhoward/go-pubsub" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + datatransfer "github.com/filecoin-project/go-data-transfer" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +var log = logging.Logger("storagemarket_impl") + +// DefaultPollingInterval is the frequency with which we query the provider for a status update +const DefaultPollingInterval = 30 * time.Second + +// DefaultMaxTraversalLinks is the maximum number of links to traverse during CommP calculation +// before returning an error +const DefaultMaxTraversalLinks = 2 << 29 + +var _ storagemarket.StorageClient = &Client{} + +// Client is the production implementation of the StorageClient interface +type Client struct { + net network.StorageMarketNetwork + + dataTransfer datatransfer.Manager + discovery *discoveryimpl.Local + node storagemarket.StorageClientNode + pubSub *pubsub.PubSub + readySub *pubsub.PubSub + statemachines fsm.Group + migrateStateMachines func(context.Context) error + pollingInterval time.Duration + maxTraversalLinks uint64 + + unsubDataTransfer datatransfer.Unsubscribe + + bstores storagemarket.BlockstoreAccessor +} + +// StorageClientOption allows custom configuration of a storage client +type StorageClientOption func(c *Client) + +// DealPollingInterval sets the interval at which this client will query the Provider for deal state while +// waiting for deal acceptance +func DealPollingInterval(t time.Duration) StorageClientOption { + return func(c *Client) { + c.pollingInterval = t + } +} + +// MaxTraversalLinks sets the maximum number of links in a DAG to traverse when calculating CommP, +// sets a budget that limits the depth and density of a DAG that can be traversed +func MaxTraversalLinks(m uint64) StorageClientOption { + return func(c *Client) { + c.maxTraversalLinks = m + } +} + +// NewClient creates a new storage client +func NewClient( + net network.StorageMarketNetwork, + dataTransfer datatransfer.Manager, + discovery *discoveryimpl.Local, + ds datastore.Batching, + scn storagemarket.StorageClientNode, + bstores storagemarket.BlockstoreAccessor, + options ...StorageClientOption, +) (*Client, error) { + c := &Client{ + net: net, + dataTransfer: dataTransfer, + discovery: discovery, + node: scn, + pubSub: pubsub.New(clientDispatcher), + readySub: pubsub.New(shared.ReadyDispatcher), + pollingInterval: DefaultPollingInterval, + maxTraversalLinks: DefaultMaxTraversalLinks, + bstores: bstores, + } + storageMigrations, err := migrations.ClientMigrations.Build() + if err != nil { + return nil, err + } + c.statemachines, c.migrateStateMachines, err = newClientStateMachine( + ds, + &clientDealEnvironment{c}, + c.dispatch, + storageMigrations, + versioning.VersionKey("1"), + ) + if err != nil { + return nil, err + } + + c.Configure(options...) + + // register a data transfer event handler -- this will send events to the state machines based on DT events + c.unsubDataTransfer = dataTransfer.SubscribeToEvents(dtutils.ClientDataTransferSubscriber(c.statemachines)) + + err = dataTransfer.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, requestvalidation.NewUnifiedRequestValidator(nil, &clientPullDeals{c})) + if err != nil { + return nil, err + } + + err = dataTransfer.RegisterTransportConfigurer(&requestvalidation.StorageDataTransferVoucher{}, dtutils.TransportConfigurer(&clientStoreGetter{c})) + if err != nil { + return nil, err + } + + return c, nil +} + +// Start initializes deal processing on a StorageClient, runs migrations and restarts +// in progress deals +func (c *Client) Start(ctx context.Context) error { + go func() { + err := c.start(ctx) + if err != nil { + log.Error(err.Error()) + } + }() + return nil +} + +// OnReady registers a listener for when the client has finished starting up +func (c *Client) OnReady(ready shared.ReadyFunc) { + c.readySub.Subscribe(ready) +} + +// Stop ends deal processing on a StorageClient +func (c *Client) Stop() error { + c.unsubDataTransfer() + return c.statemachines.Stop(context.TODO()) +} + +// ListProviders queries chain state and returns active storage providers +func (c *Client) ListProviders(ctx context.Context) (<-chan storagemarket.StorageProviderInfo, error) { + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return nil, err + } + + providers, err := c.node.ListStorageProviders(ctx, tok) + if err != nil { + return nil, err + } + + out := make(chan storagemarket.StorageProviderInfo) + + go func() { + defer close(out) + for _, p := range providers { + select { + case out <- *p: + case <-ctx.Done(): + return + } + } + }() + + return out, nil +} + +// ListLocalDeals lists deals initiated by this storage client +func (c *Client) ListLocalDeals(ctx context.Context) ([]storagemarket.ClientDeal, error) { + var out []storagemarket.ClientDeal + if err := c.statemachines.List(&out); err != nil { + return nil, err + } + return out, nil +} + +// GetLocalDeal lists deals that are in progress or rejected +func (c *Client) GetLocalDeal(ctx context.Context, cid cid.Cid) (storagemarket.ClientDeal, error) { + var out storagemarket.ClientDeal + if err := c.statemachines.Get(cid).Get(&out); err != nil { + return storagemarket.ClientDeal{}, err + } + return out, nil +} + +// GetAsk queries a provider for its current storage ask +// +// The client creates a new `StorageAskStream` for the chosen peer ID, +// and calls WriteAskRequest on it, which constructs a message and writes it to the Ask stream. +// When it receives a response, it verifies the signature and returns the validated +// StorageAsk if successful +func (c *Client) GetAsk(ctx context.Context, info storagemarket.StorageProviderInfo) (*storagemarket.StorageAsk, error) { + if len(info.Addrs) > 0 { + c.net.AddAddrs(info.PeerID, info.Addrs) + } + s, err := c.net.NewAskStream(ctx, info.PeerID) + if err != nil { + return nil, xerrors.Errorf("failed to open stream to miner: %w", err) + } + defer s.Close() //nolint + + request := network.AskRequest{Miner: info.Address} + if err := s.WriteAskRequest(request); err != nil { + return nil, xerrors.Errorf("failed to send ask request: %w", err) + } + + out, origBytes, err := s.ReadAskResponse() + if err != nil { + return nil, xerrors.Errorf("failed to read ask response: %w", err) + } + + if out.Ask == nil { + return nil, xerrors.Errorf("got no ask back") + } + + if out.Ask.Ask.Miner != info.Address { + return nil, xerrors.Errorf("got back ask for wrong miner") + } + + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return nil, err + } + + isValid, err := c.node.VerifySignature(ctx, *out.Ask.Signature, info.Worker, origBytes, tok) + if err != nil { + return nil, err + } + + if !isValid { + return nil, xerrors.Errorf("ask was not properly signed") + } + + return out.Ask.Ask, nil +} + +// GetProviderDealState queries a provider for the current state of a client's deal +func (c *Client) GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) { + var deal storagemarket.ClientDeal + err := c.statemachines.Get(proposalCid).Get(&deal) + if err != nil { + return nil, xerrors.Errorf("could not get client deal state: %w", err) + } + + s, err := c.net.NewDealStatusStream(ctx, deal.Miner) + if err != nil { + return nil, xerrors.Errorf("failed to open stream to miner: %w", err) + } + defer s.Close() //nolint + + buf, err := cborutil.Dump(&deal.ProposalCid) + if err != nil { + return nil, xerrors.Errorf("failed serialize deal status request: %w", err) + } + + signature, err := c.node.SignBytes(ctx, deal.Proposal.Client, buf) + if err != nil { + return nil, xerrors.Errorf("failed to sign deal status request: %w", err) + } + + if err := s.WriteDealStatusRequest(network.DealStatusRequest{Proposal: proposalCid, Signature: *signature}); err != nil { + return nil, xerrors.Errorf("failed to send deal status request: %w", err) + } + + resp, origBytes, err := s.ReadDealStatusResponse() + if err != nil { + return nil, xerrors.Errorf("failed to read deal status response: %w", err) + } + + valid, err := c.verifyStatusResponseSignature(ctx, deal.MinerWorker, resp, origBytes) + if err != nil { + return nil, err + } + + if !valid { + return nil, xerrors.Errorf("invalid deal status response signature") + } + + return &resp.DealState, nil +} + +// ProposeStorageDeal initiates the retrieval deal flow, which involves multiple requests and responses. +// +// This function is called after using ListProviders and QueryAs are used to identify an appropriate provider +// to store data. The parameters passed to ProposeStorageDeal should matched those returned by the miner from +// QueryAsk to ensure the greatest likelihood the provider will accept the deal. +// +// When called, the client takes the following actions: +// +// 1. Calculates the PieceCID for this deal from the given PayloadCID. (by writing the payload to a CAR file then calculating +// a merkle root for the resulting data) +// +// 2. Constructs a `DealProposal` (spec-actors type) with deal terms +// +// 3. Signs the `DealProposal` to make a ClientDealProposal +// +// 4. Gets the CID for the ClientDealProposal +// +// 5. Construct a ClientDeal to track the state of this deal. +// +// 6. Tells its statemachine to begin tracking the deal state by the CID of the ClientDealProposal +// +// 7. Triggers a `ClientEventOpen` event on its statemachine. +// +// 8. Records the Provider as a possible peer for retrieving this data in the future +// +// From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling +// `SubscribeToEvents` on the Client. The Client also provides access to the node and network and other functionality through +// its implementation of the Client FSM's ClientDealEnvironment. +// +// Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates +func (c *Client) ProposeStorageDeal(ctx context.Context, params storagemarket.ProposeStorageDealParams) (*storagemarket.ProposeStorageDealResult, error) { + err := c.addMultiaddrs(ctx, params.Info.Address) + if err != nil { + return nil, xerrors.Errorf("looking up addresses: %w", err) + } + + bs, err := c.bstores.Get(params.Data.Root) + if err != nil { + return nil, xerrors.Errorf("failed to get blockstore for imported root %s: %w", params.Data.Root, err) + } + + commP, pieceSize, err := clientutils.CommP(ctx, bs, params.Data, c.maxTraversalLinks) + if err != nil { + return nil, xerrors.Errorf("computing commP failed: %w", err) + } + + if uint64(pieceSize.Padded()) > params.Info.SectorSize { + return nil, fmt.Errorf("cannot propose a deal whose piece size (%d) is greater than sector size (%d)", pieceSize.Padded(), params.Info.SectorSize) + } + + pcMin := params.Collateral + if pcMin.Int == nil || pcMin.IsZero() { + pcMin, _, err = c.node.DealProviderCollateralBounds(ctx, pieceSize.Padded(), params.VerifiedDeal) + if err != nil { + return nil, xerrors.Errorf("computing deal provider collateral bound failed: %w", err) + } + } + + label, err := clientutils.LabelField(params.Data.Root) + if err != nil { + return nil, xerrors.Errorf("creating label field in proposal: %w", err) + } + + dealProposal := market.DealProposal{ + PieceCID: commP, + PieceSize: pieceSize.Padded(), + Client: params.Addr, + Provider: params.Info.Address, + Label: label, + StartEpoch: params.StartEpoch, + EndEpoch: params.EndEpoch, + StoragePricePerEpoch: params.Price, + ProviderCollateral: pcMin, + ClientCollateral: big.Zero(), + VerifiedDeal: params.VerifiedDeal, + } + + clientDealProposal, err := c.node.SignProposal(ctx, params.Addr, dealProposal) + if err != nil { + return nil, xerrors.Errorf("signing deal proposal failed: %w", err) + } + + proposalNd, err := cborutil.AsIpld(clientDealProposal) + if err != nil { + return nil, xerrors.Errorf("getting proposal node failed: %w", err) + } + + deal := &storagemarket.ClientDeal{ + ProposalCid: proposalNd.Cid(), + ClientDealProposal: *clientDealProposal, + State: storagemarket.StorageDealUnknown, + Miner: params.Info.PeerID, + MinerWorker: params.Info.Worker, + DataRef: params.Data, + FastRetrieval: params.FastRetrieval, + DealStages: storagemarket.NewDealStages(), + CreationTime: curTime(), + } + + err = c.statemachines.Begin(proposalNd.Cid(), deal) + if err != nil { + return nil, xerrors.Errorf("setting up deal tracking: %w", err) + } + + err = c.statemachines.Send(deal.ProposalCid, storagemarket.ClientEventOpen) + if err != nil { + return nil, xerrors.Errorf("initializing state machine: %w", err) + } + + return &storagemarket.ProposeStorageDealResult{ + ProposalCid: deal.ProposalCid, + }, c.discovery.AddPeer(ctx, params.Data.Root, retrievalmarket.RetrievalPeer{ + Address: dealProposal.Provider, + ID: deal.Miner, + PieceCID: &commP, + }) +} + +func curTime() cbg.CborTime { + now := time.Now() + return cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) +} + +// GetPaymentEscrow returns the current funds available for deal payment +func (c *Client) GetPaymentEscrow(ctx context.Context, addr address.Address) (storagemarket.Balance, error) { + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return storagemarket.Balance{}, err + } + + return c.node.GetBalance(ctx, addr, tok) +} + +// AddPaymentEscrow adds funds for storage deals +func (c *Client) AddPaymentEscrow(ctx context.Context, addr address.Address, amount abi.TokenAmount) error { + done := make(chan error, 1) + + mcid, err := c.node.AddFunds(ctx, addr, amount) + if err != nil { + return err + } + + err = c.node.WaitForMessage(ctx, mcid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { + if err != nil { + done <- xerrors.Errorf("AddFunds errored: %w", err) + } else if code != exitcode.Ok { + done <- xerrors.Errorf("AddFunds error, exit code: %s", code.String()) + } else { + done <- nil + } + return nil + }) + + if err != nil { + return err + } + + return <-done +} + +// SubscribeToEvents allows another component to listen for events on the StorageClient +// in order to track deals as they progress through the deal flow +func (c *Client) SubscribeToEvents(subscriber storagemarket.ClientSubscriber) shared.Unsubscribe { + return shared.Unsubscribe(c.pubSub.Subscribe(subscriber)) +} + +// PollingInterval is a getter for the polling interval option +func (c *Client) PollingInterval() time.Duration { + return c.pollingInterval +} + +// Configure applies the given list of StorageClientOptions after a StorageClient +// is initialized +func (c *Client) Configure(options ...StorageClientOption) { + for _, option := range options { + option(c) + } +} + +func (c *Client) start(ctx context.Context) error { + err := c.migrateStateMachines(ctx) + publishErr := c.readySub.Publish(err) + if publishErr != nil { + log.Warnf("Publish storage client ready event: %s", err.Error()) + } + if err != nil { + return fmt.Errorf("Migrating storage client state machines: %w", err) + } + if err := c.restartDeals(ctx); err != nil { + return fmt.Errorf("Failed to restart deals: %w", err) + } + return nil +} + +func (c *Client) restartDeals(ctx context.Context) error { + var deals []storagemarket.ClientDeal + err := c.statemachines.List(&deals) + if err != nil { + return err + } + + for _, deal := range deals { + if c.statemachines.IsTerminated(deal) { + continue + } + + err = c.addMultiaddrs(ctx, deal.Proposal.Provider) + if err != nil { + return err + } + + err = c.statemachines.Send(deal.ProposalCid, storagemarket.ClientEventRestart) + if err != nil { + return err + } + } + return nil +} + +func (c *Client) dispatch(eventName fsm.EventName, deal fsm.StateType) { + evt, ok := eventName.(storagemarket.ClientEvent) + if !ok { + log.Errorf("dropped bad event %s", eventName) + } + realDeal, ok := deal.(storagemarket.ClientDeal) + if !ok { + log.Errorf("not a ClientDeal %v", deal) + } + pubSubEvt := internalClientEvent{evt, realDeal} + + if err := c.pubSub.Publish(pubSubEvt); err != nil { + log.Errorf("failed to publish event %d", evt) + } +} + +func (c *Client) verifyStatusResponseSignature(ctx context.Context, miner address.Address, response network.DealStatusResponse, origBytes []byte) (bool, error) { + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return false, xerrors.Errorf("getting chain head: %w", err) + } + + valid, err := c.node.VerifySignature(ctx, response.Signature, miner, origBytes, tok) + if err != nil { + return false, xerrors.Errorf("validating signature: %w", err) + } + + return valid, nil +} + +func (c *Client) addMultiaddrs(ctx context.Context, providerAddr address.Address) error { + tok, _, err := c.node.GetChainHead(ctx) + if err != nil { + return err + } + minfo, err := c.node.GetMinerInfo(ctx, providerAddr, tok) + if err != nil { + return err + } + + if len(minfo.Addrs) > 0 { + c.net.AddAddrs(minfo.PeerID, minfo.Addrs) + } + + return nil +} + +func newClientStateMachine(ds datastore.Batching, env fsm.Environment, notifier fsm.Notifier, storageMigrations versioning.VersionedMigrationList, target versioning.VersionKey) (fsm.Group, func(context.Context) error, error) { + return versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: env, + StateType: storagemarket.ClientDeal{}, + StateKeyField: "State", + Events: clientstates.ClientEvents, + StateEntryFuncs: clientstates.ClientStateEntryFuncs, + FinalityStates: clientstates.ClientFinalityStates, + Notifier: notifier, + }, storageMigrations, target) +} + +type internalClientEvent struct { + evt storagemarket.ClientEvent + deal storagemarket.ClientDeal +} + +func clientDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { + ie, ok := evt.(internalClientEvent) + if !ok { + return xerrors.New("wrong type of event") + } + cb, ok := fn.(storagemarket.ClientSubscriber) + if !ok { + return xerrors.New("wrong type of event") + } + log.Debugw("process storage client listeners", "name", storagemarket.ClientEvents[ie.evt], "proposal cid", ie.deal.ProposalCid) + cb(ie.evt, ie.deal) + return nil +} + +// ClientFSMParameterSpec is a valid set of parameters for a client deal FSM - used in doc generation +var ClientFSMParameterSpec = fsm.Parameters{ + Environment: &clientDealEnvironment{}, + StateType: storagemarket.ClientDeal{}, + StateKeyField: "State", + Events: clientstates.ClientEvents, + StateEntryFuncs: clientstates.ClientStateEntryFuncs, + FinalityStates: clientstates.ClientFinalityStates, +} + +var _ clientstates.ClientDealEnvironment = &clientDealEnvironment{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_environments.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_environments.go new file mode 100644 index 00000000000..11094acb663 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_environments.go @@ -0,0 +1,92 @@ +package storageimpl + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +// ------- +// clientDealEnvironment +// ------- + +type clientDealEnvironment struct { + c *Client +} + +func (c *clientDealEnvironment) NewDealStream(ctx context.Context, p peer.ID) (network.StorageDealStream, error) { + return c.c.net.NewDealStream(ctx, p) +} + +func (c *clientDealEnvironment) Node() storagemarket.StorageClientNode { + return c.c.node +} + +func (c *clientDealEnvironment) CleanBlockstore(payloadCid cid.Cid) error { + return c.c.bstores.Done(payloadCid) +} + +func (c *clientDealEnvironment) StartDataTransfer(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, + error) { + chid, err := c.c.dataTransfer.OpenPushDataChannel(ctx, to, voucher, baseCid, selector) + return chid, err +} + +func (c *clientDealEnvironment) RestartDataTransfer(ctx context.Context, channelId datatransfer.ChannelID) error { + return c.c.dataTransfer.RestartDataTransferChannel(ctx, channelId) +} + +func (c *clientDealEnvironment) GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) { + return c.c.GetProviderDealState(ctx, proposalCid) +} + +func (c *clientDealEnvironment) PollingInterval() time.Duration { + return c.c.pollingInterval +} + +type clientStoreGetter struct { + c *Client +} + +func (csg *clientStoreGetter) Get(proposalCid cid.Cid) (bstore.Blockstore, error) { + var deal storagemarket.ClientDeal + err := csg.c.statemachines.Get(proposalCid).Get(&deal) + if err != nil { + return nil, xerrors.Errorf("failed to get client deal state: %w", err) + } + + bs, err := csg.c.bstores.Get(deal.DataRef.Root) + if err != nil { + return nil, xerrors.Errorf("failed to get blockstore for %s: %w", proposalCid, err) + } + + return bs, nil +} + +func (c *clientDealEnvironment) TagPeer(peer peer.ID, tag string) { + c.c.net.TagPeer(peer, tag) +} + +func (c *clientDealEnvironment) UntagPeer(peer peer.ID, tag string) { + c.c.net.UntagPeer(peer, tag) +} + +type clientPullDeals struct { + c *Client +} + +func (cpd *clientPullDeals) Get(proposalCid cid.Cid) (storagemarket.ClientDeal, error) { + var deal storagemarket.ClientDeal + err := cpd.c.statemachines.GetSync(context.TODO(), proposalCid, &deal) + return deal, err +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_test.go new file mode 100644 index 00000000000..2601abc400d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/client_test.go @@ -0,0 +1,164 @@ +package storageimpl_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/exp/rand" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +var noOpDelay = testnodes.DelayFakeCommonNode{} + +func TestClient_Configure(t *testing.T) { + c := &storageimpl.Client{} + assert.Equal(t, time.Duration(0), c.PollingInterval()) + + c.Configure(storageimpl.DealPollingInterval(123 * time.Second)) + + assert.Equal(t, 123*time.Second, c.PollingInterval()) +} + +func TestClient_Migrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + deps := dependencies.NewDependenciesWithTestData(t, ctx, shared_testutil.NewLibp2pTestData(ctx, t), testnodes.NewStorageMarketState(), "", noOpDelay, + noOpDelay) + + clientDs := namespace.Wrap(deps.TestData.Ds1, datastore.NewKey("/deals/client")) + + numDeals := 5 + dealProposals := make([]*market.ClientDealProposal, numDeals) + proposalCids := make([]cid.Cid, numDeals) + addFundsCids := make([]*cid.Cid, numDeals) + miners := make([]peer.ID, numDeals) + dealIDs := make([]abi.DealID, numDeals) + payloadCids := make([]cid.Cid, numDeals) + messages := make([]string, numDeals) + publishMessages := make([]*cid.Cid, numDeals) + fastRetrievals := make([]bool, numDeals) + storeIDs := make([]*uint64, numDeals) + fundsReserveds := make([]abi.TokenAmount, numDeals) + creationTimes := make([]cbg.CborTime, numDeals) + + for i := 0; i < numDeals; i++ { + dealProposals[i] = shared_testutil.MakeTestClientDealProposal() + proposalNd, err := cborutil.AsIpld(dealProposals[i]) + require.NoError(t, err) + proposalCids[i] = proposalNd.Cid() + payloadCids[i] = shared_testutil.GenerateCids(1)[0] + storeID := rand.Uint64() + storeIDs[i] = &storeID + messages[i] = string(shared_testutil.RandomBytes(20)) + fundsReserveds[i] = big.NewInt(rand.Int63()) + fastRetrievals[i] = rand.Intn(2) == 1 + publishMessage := shared_testutil.GenerateCids(1)[0] + publishMessages[i] = &publishMessage + addFundsCid := shared_testutil.GenerateCids(1)[0] + addFundsCids[i] = &addFundsCid + dealIDs[i] = abi.DealID(rand.Uint64()) + miners[i] = shared_testutil.GeneratePeers(1)[0] + now := time.Now() + creationTimes[i] = cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) + timeBuf := new(bytes.Buffer) + err = creationTimes[i].MarshalCBOR(timeBuf) + require.NoError(t, err) + err = cborutil.ReadCborRPC(timeBuf, &creationTimes[i]) + require.NoError(t, err) + deal := migrations.ClientDeal0{ + ClientDealProposal: *dealProposals[i], + ProposalCid: proposalCids[i], + AddFundsCid: addFundsCids[i], + State: storagemarket.StorageDealExpired, + Miner: miners[i], + MinerWorker: address.TestAddress2, + DealID: dealIDs[i], + DataRef: &migrations.DataRef0{ + TransferType: storagemarket.TTGraphsync, + Root: payloadCids[i], + }, + Message: messages[i], + PublishMessage: publishMessages[i], + SlashEpoch: abi.ChainEpoch(0), + PollRetryCount: 0, + PollErrorCount: 0, + FastRetrieval: fastRetrievals[i], + StoreID: storeIDs[i], + FundsReserved: fundsReserveds[i], + CreationTime: creationTimes[i], + } + buf := new(bytes.Buffer) + err = deal.MarshalCBOR(buf) + require.NoError(t, err) + err = clientDs.Put(ctx, datastore.NewKey(deal.ProposalCid.String()), buf.Bytes()) + require.NoError(t, err) + } + client, err := storageimpl.NewClient( + network.NewFromLibp2pHost(deps.TestData.Host1, network.RetryParameters(0, 0, 0, 0)), + deps.DTClient, + deps.PeerResolver, + clientDs, + deps.ClientNode, + shared_testutil.NewTestStorageBlockstoreAccessor(), + storageimpl.DealPollingInterval(0), + ) + require.NoError(t, err) + + shared_testutil.StartAndWaitForReady(ctx, t, client) + deals, err := client.ListLocalDeals(ctx) + require.NoError(t, err) + for i := 0; i < numDeals; i++ { + var deal storagemarket.ClientDeal + for _, testDeal := range deals { + if testDeal.DataRef.Root.Equals(payloadCids[i]) { + deal = testDeal + break + } + } + expectedDeal := storagemarket.ClientDeal{ + ClientDealProposal: *dealProposals[i], + ProposalCid: proposalCids[i], + AddFundsCid: addFundsCids[i], + State: storagemarket.StorageDealExpired, + Miner: miners[i], + MinerWorker: address.TestAddress2, + DealID: dealIDs[i], + DataRef: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: payloadCids[i], + }, + Message: messages[i], + PublishMessage: publishMessages[i], + SlashEpoch: abi.ChainEpoch(0), + PollRetryCount: 0, + PollErrorCount: 0, + FastRetrieval: fastRetrievals[i], + FundsReserved: fundsReserveds[i], + CreationTime: creationTimes[i], + } + require.Equal(t, expectedDeal, deal) + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_fsm.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_fsm.go new file mode 100644 index 00000000000..39aeacb0361 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_fsm.go @@ -0,0 +1,300 @@ +package clientstates + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// ClientEvents are the events that can happen in a storage client +var ClientEvents = fsm.Events{ + fsm.Event(storagemarket.ClientEventOpen). + From(storagemarket.StorageDealUnknown).To(storagemarket.StorageDealReserveClientFunds), + fsm.Event(storagemarket.ClientEventFundingInitiated). + From(storagemarket.StorageDealReserveClientFunds).To(storagemarket.StorageDealClientFunding). + Action(func(deal *storagemarket.ClientDeal, mcid cid.Cid) error { + deal.AddFundsCid = &mcid + deal.AddLog("reserving funds for storage deal, message cid: <%s>", mcid) + return nil + }), + fsm.Event(storagemarket.ClientEventReserveFundsFailed). + FromMany(storagemarket.StorageDealClientFunding, storagemarket.StorageDealReserveClientFunds).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("adding market funds failed: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventFundsReserved). + From(storagemarket.StorageDealReserveClientFunds).ToJustRecord(). + Action(func(deal *storagemarket.ClientDeal, fundsReserved abi.TokenAmount) error { + if deal.FundsReserved.Nil() { + deal.FundsReserved = fundsReserved + } else { + deal.FundsReserved = big.Add(deal.FundsReserved, fundsReserved) + } + deal.AddLog("funds reserved, amount <%s>", fundsReserved) + return nil + }), + fsm.Event(storagemarket.ClientEventFundsReleased). + FromMany(storagemarket.StorageDealProposalAccepted, storagemarket.StorageDealFailing).ToJustRecord(). + Action(func(deal *storagemarket.ClientDeal, fundsReleased abi.TokenAmount) error { + deal.FundsReserved = big.Subtract(deal.FundsReserved, fundsReleased) + deal.AddLog("funds released, amount <%s>", fundsReleased) + return nil + }), + fsm.Event(storagemarket.ClientEventFundingComplete). + FromMany(storagemarket.StorageDealReserveClientFunds, storagemarket.StorageDealClientFunding).To(storagemarket.StorageDealFundsReserved), + fsm.Event(storagemarket.ClientEventWriteProposalFailed). + From(storagemarket.StorageDealFundsReserved).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("sending proposal to storage provider failed: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventReadResponseFailed). + From(storagemarket.StorageDealFundsReserved).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error reading Response message from provider: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventResponseVerificationFailed). + From(storagemarket.StorageDealFundsReserved).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal) error { + deal.Message = "unable to verify signature on deal response" + deal.AddLog(deal.Message) + return nil + }), + + fsm.Event(storagemarket.ClientEventInitiateDataTransfer). + From(storagemarket.StorageDealFundsReserved).To(storagemarket.StorageDealStartDataTransfer). + Action(func(deal *storagemarket.ClientDeal) error { + deal.AddLog("opening data transfer to storage provider") + return nil + }), + fsm.Event(storagemarket.ClientEventUnexpectedDealState). + From(storagemarket.StorageDealFundsReserved).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, status storagemarket.StorageDealStatus, providerMessage string) error { + deal.Message = xerrors.Errorf("unexpected deal status while waiting for data request: %d (%s). Provider message: %s", status, storagemarket.DealStates[status], providerMessage).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDataTransferFailed). + FromMany(storagemarket.StorageDealStartDataTransfer, storagemarket.StorageDealTransferring, storagemarket.StorageDealTransferQueued). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("failed to complete data transfer: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferRestartFailed).From(storagemarket.StorageDealClientTransferRestart). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("failed to restart data transfer: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + + // The client has sent a push request to the provider, and in response the provider has + // opened a request for data to the client. The transfer is in the client's queue. + fsm.Event(storagemarket.ClientEventDataTransferQueued). + FromMany(storagemarket.StorageDealStartDataTransfer).To(storagemarket.StorageDealTransferQueued). + Action(func(deal *storagemarket.ClientDeal, channelId datatransfer.ChannelID) error { + deal.AddLog("provider data transfer request added to client's queue: channel id <%s>", channelId) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferInitiated). + FromMany(storagemarket.StorageDealTransferQueued).To(storagemarket.StorageDealTransferring). + Action(func(deal *storagemarket.ClientDeal, channelId datatransfer.ChannelID) error { + deal.TransferChannelID = &channelId + deal.AddLog("data transfer initiated on channel id <%s>", channelId) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferRestarted). + FromMany(storagemarket.StorageDealClientTransferRestart, storagemarket.StorageDealStartDataTransfer, storagemarket.StorageDealTransferQueued).To(storagemarket.StorageDealTransferring). + From(storagemarket.StorageDealTransferring).ToJustRecord(). + Action(func(deal *storagemarket.ClientDeal, channelId datatransfer.ChannelID) error { + deal.TransferChannelID = &channelId + deal.Message = "" + deal.AddLog("data transfer restarted on channel id <%s>", channelId) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferStalled). + FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealTransferQueued). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("could not complete data transfer, could not connect to provider %s", deal.Miner).Error() + deal.AddLog(deal.Message) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferCancelled). + FromMany( + storagemarket.StorageDealStartDataTransfer, + storagemarket.StorageDealTransferring, + storagemarket.StorageDealClientTransferRestart, + storagemarket.StorageDealTransferQueued, + ). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal) error { + deal.Message = "data transfer cancelled" + deal.AddLog(deal.Message) + return nil + }), + + fsm.Event(storagemarket.ClientEventDataTransferComplete). + FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealStartDataTransfer, storagemarket.StorageDealTransferQueued). + To(storagemarket.StorageDealCheckForAcceptance), + fsm.Event(storagemarket.ClientEventWaitForDealState). + From(storagemarket.StorageDealCheckForAcceptance).ToNoChange(). + Action(func(deal *storagemarket.ClientDeal, pollError bool, providerState storagemarket.StorageDealStatus) error { + deal.PollRetryCount++ + if pollError { + deal.PollErrorCount++ + } + deal.Message = fmt.Sprintf("Provider state: %s", storagemarket.DealStates[providerState]) + switch storagemarket.DealStates[providerState] { + case "StorageDealVerifyData": + deal.AddLog("provider is verifying the data") + case "StorageDealPublish": + deal.AddLog("waiting for provider to publish the deal on-chain") // TODO: is that right? + case "StorageDealPublishing": + deal.AddLog("provider has submitted the deal on-chain and is waiting for confirmation") // TODO: is that right? + case "StorageDealProviderFunding": + deal.AddLog("waiting for provider to lock collateral on-chain") // TODO: is that right? + default: + deal.AddLog(deal.Message) + } + return nil + }), + fsm.Event(storagemarket.ClientEventResponseDealDidNotMatch). + From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, responseCid cid.Cid, proposalCid cid.Cid) error { + deal.Message = xerrors.Errorf("miner responded to a wrong proposal: %s != %s", responseCid, proposalCid).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealRejected). + From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.ClientDeal, state storagemarket.StorageDealStatus, reason string) error { + deal.Message = xerrors.Errorf("deal failed: (State=%d) %s", state, reason).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealAccepted). + From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealProposalAccepted). + Action(func(deal *storagemarket.ClientDeal, publishMessage *cid.Cid) error { + deal.PublishMessage = publishMessage + deal.Message = "" + deal.AddLog("deal has been accepted by storage provider") + return nil + }), + fsm.Event(storagemarket.ClientEventStreamCloseError). + FromAny().To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error attempting to close stream: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealPublishFailed). + From(storagemarket.StorageDealProposalAccepted).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error validating deal published: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealPublished). + From(storagemarket.StorageDealProposalAccepted).To(storagemarket.StorageDealAwaitingPreCommit). + Action(func(deal *storagemarket.ClientDeal, dealID abi.DealID) error { + deal.DealID = dealID + deal.AddLog("") + return nil + }), + fsm.Event(storagemarket.ClientEventDealPrecommitFailed). + From(storagemarket.StorageDealAwaitingPreCommit).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error waiting for deal pre-commit message to appear on chain: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealPrecommitted). + From(storagemarket.StorageDealAwaitingPreCommit).To(storagemarket.StorageDealSealing). + Action(func(deal *storagemarket.ClientDeal, sectorNumber abi.SectorNumber) error { + deal.SectorNumber = sectorNumber + deal.AddLog("deal pre-commit message has landed on chain") + return nil + }), + fsm.Event(storagemarket.ClientEventDealActivationFailed). + From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error in deal activation: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventDealActivated). + FromMany(storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing). + To(storagemarket.StorageDealActive). + Action(func(deal *storagemarket.ClientDeal) error { + deal.AddLog("deal activated") + return nil + }), + fsm.Event(storagemarket.ClientEventDealSlashed). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealSlashed). + Action(func(deal *storagemarket.ClientDeal, slashEpoch abi.ChainEpoch) error { + deal.SlashEpoch = slashEpoch + deal.AddLog("deal slashed at epoch <%d>", slashEpoch) + return nil + }), + fsm.Event(storagemarket.ClientEventDealExpired). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealExpired), + fsm.Event(storagemarket.ClientEventDealCompletionFailed). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal, err error) error { + deal.Message = xerrors.Errorf("error waiting for deal completion: %w", err).Error() + deal.AddLog(deal.Message) + return nil + }), + fsm.Event(storagemarket.ClientEventFailed). + From(storagemarket.StorageDealFailing).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.ClientDeal) error { + deal.AddLog("") + return nil + }), + fsm.Event(storagemarket.ClientEventRestart).From(storagemarket.StorageDealTransferring).To(storagemarket.StorageDealClientTransferRestart). + FromAny().ToNoChange(), +} + +// ClientStateEntryFuncs are the handlers for different states in a storage client +var ClientStateEntryFuncs = fsm.StateEntryFuncs{ + storagemarket.StorageDealReserveClientFunds: ReserveClientFunds, + storagemarket.StorageDealClientFunding: WaitForFunding, + storagemarket.StorageDealFundsReserved: ProposeDeal, + storagemarket.StorageDealStartDataTransfer: InitiateDataTransfer, + storagemarket.StorageDealClientTransferRestart: RestartDataTransfer, + storagemarket.StorageDealCheckForAcceptance: CheckForDealAcceptance, + storagemarket.StorageDealProposalAccepted: ValidateDealPublished, + storagemarket.StorageDealAwaitingPreCommit: VerifyDealPreCommitted, + storagemarket.StorageDealSealing: VerifyDealActivated, + storagemarket.StorageDealActive: WaitForDealCompletion, + storagemarket.StorageDealFailing: FailDeal, +} + +// ClientFinalityStates are the states that terminate deal processing for a deal. +// When a client restarts, it restarts only deals that are not in a finality state. +var ClientFinalityStates = []fsm.StateKey{ + storagemarket.StorageDealSlashed, + storagemarket.StorageDealExpired, + storagemarket.StorageDealError, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states.go new file mode 100644 index 00000000000..77c756fa5bf --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states.go @@ -0,0 +1,359 @@ +package clientstates + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +var log = logging.Logger("storagemarket_impl") + +// MaxGraceEpochsForDealAcceptance is the maximum number of epochs we will wait for past the start epoch +// for the provider to publish a deal. +const MaxGraceEpochsForDealAcceptance = 10 + +// ClientDealEnvironment is an abstraction for interacting with +// dependencies from the storage client environment +type ClientDealEnvironment interface { + // CleanBlockstore cleans up the read-only CARv2 blockstore that provides random access on top of client deal data. + // It's important to do this when the client deal finishes successfully or errors out. + CleanBlockstore(rootCid cid.Cid) error + Node() storagemarket.StorageClientNode + NewDealStream(ctx context.Context, p peer.ID) (network.StorageDealStream, error) + StartDataTransfer(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) + RestartDataTransfer(ctx context.Context, chid datatransfer.ChannelID) error + GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) + PollingInterval() time.Duration + network.PeerTagger +} + +// ClientStateEntryFunc is the type for all state entry functions on a storage client +type ClientStateEntryFunc func(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error + +// ReserveClientFunds attempts to reserve funds for this deal and ensure they are available in the Storage Market Actor +func ReserveClientFunds(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + node := environment.Node() + + mcid, err := node.ReserveFunds(ctx.Context(), deal.Proposal.Client, deal.Proposal.Client, deal.Proposal.ClientBalanceRequirement()) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventReserveFundsFailed, err) + } + + _ = ctx.Trigger(storagemarket.ClientEventFundsReserved, deal.Proposal.ClientBalanceRequirement()) + + // if no message was sent, and there was no error, funds were already available + if mcid == cid.Undef { + return ctx.Trigger(storagemarket.ClientEventFundingComplete) + } + // Otherwise wait for funds to be added + return ctx.Trigger(storagemarket.ClientEventFundingInitiated, mcid) +} + +// WaitForFunding waits for an AddFunds message to appear on the chain +func WaitForFunding(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + node := environment.Node() + + return node.WaitForMessage(ctx.Context(), *deal.AddFundsCid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { + if err != nil { + return ctx.Trigger(storagemarket.ClientEventReserveFundsFailed, xerrors.Errorf("AddFunds err: %w", err)) + } + if code != exitcode.Ok { + return ctx.Trigger(storagemarket.ClientEventReserveFundsFailed, xerrors.Errorf("AddFunds exit code: %s", code.String())) + } + return ctx.Trigger(storagemarket.ClientEventFundingComplete) + + }) +} + +// ProposeDeal sends the deal proposal to the provider +func ProposeDeal(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + proposal := network.Proposal{ + DealProposal: &deal.ClientDealProposal, + Piece: deal.DataRef, + FastRetrieval: deal.FastRetrieval, + } + + s, err := environment.NewDealStream(ctx.Context(), deal.Miner) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventWriteProposalFailed, err) + } + + environment.TagPeer(deal.Miner, deal.ProposalCid.String()) + + if err := s.WriteDealProposal(proposal); err != nil { + return ctx.Trigger(storagemarket.ClientEventWriteProposalFailed, err) + } + + resp, origBytes, err := s.ReadDealResponse() + if err != nil { + return ctx.Trigger(storagemarket.ClientEventReadResponseFailed, err) + } + + err = s.Close() + if err != nil { + return ctx.Trigger(storagemarket.ClientEventStreamCloseError, err) + } + + tok, _, err := environment.Node().GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventResponseVerificationFailed) + } + + verified, err := environment.Node().VerifySignature(ctx.Context(), *resp.Signature, deal.MinerWorker, origBytes, tok) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventResponseVerificationFailed) + } + + if !verified { + return ctx.Trigger(storagemarket.ClientEventResponseVerificationFailed) + } + + if resp.Response.State != storagemarket.StorageDealWaitingForData { + return ctx.Trigger(storagemarket.ClientEventUnexpectedDealState, resp.Response.State, resp.Response.Message) + } + + return ctx.Trigger(storagemarket.ClientEventInitiateDataTransfer) +} + +// RestartDataTransfer restarts a data transfer to the provider that was initiated earlier +func RestartDataTransfer(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + log.Infof("restarting data transfer for deal %s", deal.ProposalCid) + + if deal.TransferChannelID == nil { + return ctx.Trigger(storagemarket.ClientEventDataTransferRestartFailed, xerrors.New("channelId on client deal is nil")) + } + + // restart the push data transfer. This will complete asynchronously and the + // completion of the data transfer will trigger a change in deal state + err := environment.RestartDataTransfer(ctx.Context(), + *deal.TransferChannelID, + ) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventDataTransferRestartFailed, err) + } + + return nil +} + +// InitiateDataTransfer initiates data transfer to the provider +func InitiateDataTransfer(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + if deal.DataRef.TransferType == storagemarket.TTManual { + log.Infof("manual data transfer for deal %s", deal.ProposalCid) + return ctx.Trigger(storagemarket.ClientEventDataTransferComplete) + } + + log.Infof("sending data for a deal %s", deal.ProposalCid) + + // initiate a push data transfer. This will complete asynchronously and the + // completion of the data transfer will trigger a change in deal state + _, err := environment.StartDataTransfer(ctx.Context(), + deal.Miner, + &requestvalidation.StorageDataTransferVoucher{Proposal: deal.ProposalCid}, + deal.DataRef.Root, + selectorparse.CommonSelector_ExploreAllRecursively, + ) + + if err != nil { + return ctx.Trigger(storagemarket.ClientEventDataTransferFailed, xerrors.Errorf("failed to open push data channel: %w", err)) + } + + return nil +} + +// CheckForDealAcceptance is run until the deal is sealed and published by the provider, or errors +func CheckForDealAcceptance(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + _, currEpoch, err := environment.Node().GetChainHead(ctx.Context()) + + if err == nil { + if currEpoch > deal.Proposal.StartEpoch+MaxGraceEpochsForDealAcceptance { + return ctx.Trigger(storagemarket.ClientEventDealRejected, deal.State, "start epoch already elapsed") + } + } + + dealState, err := environment.GetProviderDealState(ctx.Context(), deal.ProposalCid) + if err != nil { + log.Warnf("error when querying provider deal state: %w", err) // TODO: at what point do we fail the deal? + return waitAgain(ctx, environment, true, storagemarket.StorageDealUnknown) + } + + if isFailed(dealState.State) { + return ctx.Trigger(storagemarket.ClientEventDealRejected, dealState.State, dealState.Message) + } + + if isAccepted(dealState.State) { + if *dealState.ProposalCid != deal.ProposalCid { + return ctx.Trigger(storagemarket.ClientEventResponseDealDidNotMatch, *dealState.ProposalCid, deal.ProposalCid) + } + + return ctx.Trigger(storagemarket.ClientEventDealAccepted, dealState.PublishCid) + } + + return waitAgain(ctx, environment, false, dealState.State) +} + +func waitAgain(ctx fsm.Context, environment ClientDealEnvironment, pollError bool, providerState storagemarket.StorageDealStatus) error { + t := time.NewTimer(environment.PollingInterval()) + + go func() { + select { + case <-t.C: + _ = ctx.Trigger(storagemarket.ClientEventWaitForDealState, pollError, providerState) + case <-ctx.Context().Done(): + t.Stop() + return + } + }() + + return nil +} + +// ValidateDealPublished confirms with the chain that a deal was published +func ValidateDealPublished(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + + dealID, err := environment.Node().ValidatePublishedDeal(ctx.Context(), deal) + if err != nil { + return ctx.Trigger(storagemarket.ClientEventDealPublishFailed, err) + } + + releaseReservedFunds(ctx, environment, deal) + + // at this point data transfer is complete, so unprotect peer connection + environment.UntagPeer(deal.Miner, deal.ProposalCid.String()) + + return ctx.Trigger(storagemarket.ClientEventDealPublished, dealID) +} + +// VerifyDealPreCommitted verifies that a deal has been pre-committed +func VerifyDealPreCommitted(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { + // It's possible that + // - we miss the pre-commit message and have to wait for prove-commit + // - the deal is already active (for example if the node is restarted + // while waiting for pre-commit) + // In either of these two cases, isActive will be true. + switch { + case err != nil: + _ = ctx.Trigger(storagemarket.ClientEventDealPrecommitFailed, err) + case isActive: + _ = ctx.Trigger(storagemarket.ClientEventDealActivated) + default: + _ = ctx.Trigger(storagemarket.ClientEventDealPrecommitted, sectorNumber) + } + } + + err := environment.Node().OnDealSectorPreCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, deal.Proposal, deal.PublishMessage, cb) + + if err != nil { + return ctx.Trigger(storagemarket.ClientEventDealPrecommitFailed, err) + } + return nil +} + +// VerifyDealActivated confirms that a deal was successfully committed to a sector and is active +func VerifyDealActivated(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + cb := func(err error) { + if err != nil { + _ = ctx.Trigger(storagemarket.ClientEventDealActivationFailed, err) + } else { + + _ = ctx.Trigger(storagemarket.ClientEventDealActivated) + } + } + + if err := environment.Node().OnDealSectorCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, deal.SectorNumber, deal.Proposal, deal.PublishMessage, cb); err != nil { + return ctx.Trigger(storagemarket.ClientEventDealActivationFailed, err) + } + + return nil +} + +// WaitForDealCompletion waits for the deal to be slashed or to expire +func WaitForDealCompletion(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + node := environment.Node() + + // deal is now active, clean up the blockstore. + if err := environment.CleanBlockstore(deal.DataRef.Root); err != nil { + log.Errorf("storage deal active but failed to cleanup rea-only blockstore, proposalCid=%s, err=%s", deal.ProposalCid, err) + } + + // Called when the deal expires + expiredCb := func(err error) { + if err != nil { + _ = ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, xerrors.Errorf("deal expiration err: %w", err)) + } else { + _ = ctx.Trigger(storagemarket.ClientEventDealExpired) + } + } + + // Called when the deal is slashed + slashedCb := func(slashEpoch abi.ChainEpoch, err error) { + if err != nil { + _ = ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, xerrors.Errorf("deal slashing err: %w", err)) + } else { + _ = ctx.Trigger(storagemarket.ClientEventDealSlashed, slashEpoch) + } + } + + if err := node.OnDealExpiredOrSlashed(ctx.Context(), deal.DealID, expiredCb, slashedCb); err != nil { + return ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, err) + } + + return nil +} + +// FailDeal cleans up a failing deal +func FailDeal(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { + releaseReservedFunds(ctx, environment, deal) + + // TODO: store in some sort of audit log + log.Errorf("deal %s failed: %s", deal.ProposalCid, deal.Message) + + environment.UntagPeer(deal.Miner, deal.ProposalCid.String()) + + if err := environment.CleanBlockstore(deal.DataRef.Root); err != nil { + log.Errorf("failed to cleanup read-only blockstore, proposalCid=%s: %s", deal.ProposalCid, err) + } + + return ctx.Trigger(storagemarket.ClientEventFailed) +} + +func releaseReservedFunds(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) { + if !deal.FundsReserved.Nil() && !deal.FundsReserved.IsZero() { + err := environment.Node().ReleaseFunds(ctx.Context(), deal.Proposal.Client, deal.FundsReserved) + if err != nil { + // nonfatal error + log.Warnf("failed to release funds: %s", err) + } + _ = ctx.Trigger(storagemarket.ClientEventFundsReleased, deal.FundsReserved) + } +} + +func isAccepted(status storagemarket.StorageDealStatus) bool { + return status == storagemarket.StorageDealStaged || + status == storagemarket.StorageDealStagedOfSxx || + status == storagemarket.StorageDealAwaitingPreCommit || + status == storagemarket.StorageDealSealing || + status == storagemarket.StorageDealActive || + status == storagemarket.StorageDealExpired || + status == storagemarket.StorageDealSlashed +} + +func isFailed(status storagemarket.StorageDealStatus) bool { + return status == storagemarket.StorageDealFailing || + status == storagemarket.StorageDealError +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states_test.go new file mode 100644 index 00000000000..3e21b283b5a --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/client_states_test.go @@ -0,0 +1,835 @@ +package clientstates_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/assert" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" + + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates" + smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +var clientDealProposal = tut.MakeTestClientDealProposal() + +func TestReserveClientFunds(t *testing.T) { + t.Run("immediately succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealReserveClientFunds, clientstates.ReserveClientFunds, testCase{ + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFundsReserved, deal.State) + assert.Equal(t, env.node.DealFunds.ReserveCalls[0], deal.Proposal.ClientBalanceRequirement()) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + assert.Equal(t, deal.Proposal.ClientBalanceRequirement(), deal.FundsReserved) + }, + }) + }) + t.Run("succeeds by sending an AddFunds message", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealReserveClientFunds, clientstates.ReserveClientFunds, testCase{ + nodeParams: nodeParams{AddFundsCid: tut.GenerateCids(1)[0]}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealClientFunding, deal.State) + assert.Equal(t, env.node.DealFunds.ReserveCalls[0], deal.Proposal.ClientBalanceRequirement()) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + assert.Equal(t, deal.Proposal.ClientBalanceRequirement(), deal.FundsReserved) + }, + }) + }) + t.Run("Reserve fails", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealReserveClientFunds, clientstates.ReserveClientFunds, testCase{ + nodeParams: nodeParams{ + ReserveFundsError: errors.New("Something went wrong"), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "adding market funds failed: Something went wrong", deal.Message) + assert.Len(t, env.node.DealFunds.ReserveCalls, 0) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + assert.True(t, deal.FundsReserved.Nil()) + }, + }) + }) +} + +func TestWaitForFunding(t *testing.T) { + t.Run("succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealClientFunding, clientstates.WaitForFunding, testCase{ + nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFundsReserved, deal.State) + }, + }) + }) + t.Run("ReserveFunds fails", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealClientFunding, clientstates.WaitForFunding, testCase{ + nodeParams: nodeParams{WaitForMessageExitCode: exitcode.ErrInsufficientFunds}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "adding market funds failed: AddFunds exit code: 19", deal.Message) + }, + }) + }) +} + +func TestProposeDeal(t *testing.T) { + t.Run("succeeds, closes stream, and tags connection", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ + ResponseReader: testResponseReader(t, responseParams{ + state: storagemarket.StorageDealWaitingForData, + proposal: clientDealProposal, + }), + }) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{dealStream: ds}, + nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealStartDataTransfer, deal.State) + assert.Equal(t, 1, env.dealStream.CloseCount) + assert.Len(t, env.peerTagger.TagCalls, 1) + assert.Equal(t, deal.Miner, env.peerTagger.TagCalls[0]) + }, + }) + }) + t.Run("sends a fast retrieval flag", func(t *testing.T) { + var sentProposal *smnet.Proposal + + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ + ResponseReader: testResponseReader(t, responseParams{ + state: storagemarket.StorageDealWaitingForData, + proposal: clientDealProposal, + }), + ProposalWriter: func(proposal smnet.Proposal) error { + sentProposal = &proposal + return nil + }, + }) + + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{dealStream: ds}, + stateParams: dealStateParams{fastRetrieval: true}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealStartDataTransfer, deal.State) + assert.Equal(t, true, sentProposal.FastRetrieval) + }, + }) + }) + t.Run("write proposal fails fails", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ + ProposalWriter: tut.FailStorageProposalWriter, + }) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{dealStream: ds}, + nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "sending proposal to storage provider failed: write proposal failed", deal.Message) + }, + }) + }) + t.Run("read response fails", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ + ResponseReader: tut.FailStorageResponseReader, + }) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{ + dealStream: ds, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "error reading Response message from provider: read response failed", deal.Message) + }, + }) + }) + t.Run("closing the stream fails", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) + ds.CloseError = xerrors.Errorf("failed to close stream") + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{dealStream: ds}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error attempting to close stream: failed to close stream", deal.Message) + }, + }) + }) + t.Run("getting chain head fails", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{ + dealStream: ds, + }, + nodeParams: nodeParams{ + GetChainHeadError: xerrors.Errorf("failed getting chain head"), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "unable to verify signature on deal response", deal.Message) + }, + }) + }) + t.Run("verify signature fails", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{ + dealStream: ds, + }, + nodeParams: nodeParams{ + VerifySignatureFails: true, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "unable to verify signature on deal response", deal.Message) + }, + }) + }) + t.Run("response contains unexpected state", func(t *testing.T) { + ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ + ResponseReader: testResponseReader(t, responseParams{ + proposal: clientDealProposal, + state: storagemarket.StorageDealProposalNotFound, + message: "couldn't find deal in store", + }), + }) + runAndInspect(t, storagemarket.StorageDealFundsReserved, clientstates.ProposeDeal, testCase{ + envParams: envParams{ + dealStream: ds, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Equal(t, "unexpected deal status while waiting for data request: 1 (StorageDealProposalNotFound). Provider message: couldn't find deal in store", deal.Message) + }, + }) + }) +} + +func TestInitiateDataTransfer(t *testing.T) { + t.Run("succeeds and starts the data transfer", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ + envParams: envParams{ + dataTransferChannelId: datatransfer.ChannelID{Initiator: peer.ID("1"), Responder: peer.ID("2"), ID: datatransfer.TransferID(1)}, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + assert.Len(t, env.startDataTransferCalls, 1) + assert.Equal(t, env.startDataTransferCalls[0].to, deal.Miner) + assert.Equal(t, env.startDataTransferCalls[0].baseCid, deal.DataRef.Root) + assert.Equal(t, storagemarket.StorageDealStartDataTransfer, deal.State) + }, + }) + }) + + t.Run("starts polling for acceptance with manual transfers", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ + envParams: envParams{ + manualTransfer: true, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) + assert.Len(t, env.startDataTransferCalls, 0) + }, + }) + }) + + t.Run("fails if it can't initiate data transfer", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ + envParams: envParams{ + startDataTransferError: xerrors.Errorf("failed to start data transfer"), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + }, + }) + }) +} + +func TestRestartDataTransfer(t *testing.T) { + t.Run("fails if can't restart data transfer", func(t *testing.T) { + err := xerrors.New("error") + + runAndInspect(t, storagemarket.StorageDealClientTransferRestart, clientstates.RestartDataTransfer, testCase{ + envParams: envParams{ + restartDataTransferError: err, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + assert.Len(t, env.restartDataTransferCalls, 1) + assert.Equal(t, datatransfer.ChannelID{}, env.restartDataTransferCalls[0].channelId) + assert.Equal(t, xerrors.Errorf("failed to restart data transfer: %w", err).Error(), deal.Message) + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + }, + }) + }) +} + +func TestCheckForDealAcceptance(t *testing.T) { + testCids := tut.GenerateCids(4) + proposalCid := tut.GenerateCid(t, clientDealProposal) + + makeProviderDealState := func(status storagemarket.StorageDealStatus) *storagemarket.ProviderDealState { + return &storagemarket.ProviderDealState{ + State: status, + Message: "", + Proposal: &clientDealProposal.Proposal, + ProposalCid: &proposalCid, + AddFundsCid: &testCids[1], + PublishCid: &testCids[2], + DealID: 123, + } + } + + t.Run("succeeds when provider indicates a successful deal", func(t *testing.T) { + successStates := []storagemarket.StorageDealStatus{ + storagemarket.StorageDealActive, + storagemarket.StorageDealAwaitingPreCommit, + storagemarket.StorageDealSealing, + storagemarket.StorageDealStaged, + storagemarket.StorageDealSlashed, + storagemarket.StorageDealExpired, + } + + for _, s := range successStates { + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{ + providerDealState: makeProviderDealState(s), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealProposalAccepted, deal.State) + }, + }) + } + }) + + t.Run("fails when provider indicates a failed deal", func(t *testing.T) { + failureStates := []storagemarket.StorageDealStatus{ + storagemarket.StorageDealFailing, + storagemarket.StorageDealError, + } + + for _, s := range failureStates { + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{ + providerDealState: makeProviderDealState(s), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + }, + }) + } + }) + + t.Run("continues polling if there is an error querying provider deal state", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{ + getDealStatusErr: xerrors.Errorf("network error"), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) + assert.Equal(t, uint64(1), deal.PollRetryCount) + assert.Equal(t, uint64(1), deal.PollErrorCount) + assert.Equal(t, "Provider state: StorageDealUnknown", deal.Message) + }, + }) + }) + + t.Run("stops polling if (start epoch + grace period) has elapsed", func(t *testing.T) { + startEpoch := abi.ChainEpoch(1) + maxEpoch := startEpoch + clientstates.MaxGraceEpochsForDealAcceptance + + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{ + providerDealState: makeProviderDealState(storagemarket.StorageDealVerifyData), + }, + nodeParams: nodeParams{ + CurrentEpoch: maxEpoch + 1, + }, + stateParams: dealStateParams{ + startEpoch: startEpoch, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Contains(t, deal.Message, "start epoch already elapsed") + }, + }) + }) + + t.Run("continues polling with an indeterminate deal state", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{ + providerDealState: makeProviderDealState(storagemarket.StorageDealVerifyData), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) + assert.Equal(t, "Provider state: StorageDealVerifyData", deal.Message) + }, + }) + }) + + t.Run("fails if the wrong proposal comes back", func(t *testing.T) { + pds := makeProviderDealState(storagemarket.StorageDealActive) + pds.ProposalCid = &tut.GenerateCids(1)[0] + + runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ + envParams: envParams{providerDealState: pds}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + assert.Regexp(t, "miner responded to a wrong proposal", deal.Message) + }, + }) + }) +} + +func TestValidateDealPublished(t *testing.T) { + t.Run("succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealProposalAccepted, clientstates.ValidateDealPublished, testCase{ + nodeParams: nodeParams{ValidatePublishedDealID: abi.DealID(5)}, + stateParams: dealStateParams{ + reserveFunds: true, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + assert.Equal(t, abi.DealID(5), deal.DealID) + assert.Equal(t, env.node.DealFunds.ReleaseCalls[0], deal.Proposal.ClientBalanceRequirement()) + assert.True(t, deal.FundsReserved.Nil() || deal.FundsReserved.IsZero()) + assert.Len(t, env.peerTagger.UntagCalls, 1) + assert.Equal(t, deal.Miner, env.peerTagger.UntagCalls[0]) + }, + }) + }) + t.Run("succeeds, funds already released", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealProposalAccepted, clientstates.ValidateDealPublished, testCase{ + nodeParams: nodeParams{ValidatePublishedDealID: abi.DealID(5)}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + assert.Equal(t, abi.DealID(5), deal.DealID) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + assert.Len(t, env.peerTagger.UntagCalls, 1) + assert.Equal(t, deal.Miner, env.peerTagger.UntagCalls[0]) + }, + }) + }) + t.Run("fails", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealProposalAccepted, clientstates.ValidateDealPublished, testCase{ + nodeParams: nodeParams{ + ValidatePublishedDealID: abi.DealID(5), + ValidatePublishedError: errors.New("Something went wrong"), + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error validating deal published: Something went wrong", deal.Message) + }, + }) + }) +} + +func TestVerifyDealPreCommitted(t *testing.T) { + t.Run("succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealAwaitingPreCommit, clientstates.VerifyDealPreCommitted, testCase{ + nodeParams: nodeParams{PreCommittedSectorNumber: abi.SectorNumber(10)}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealSealing, deal.State) + assert.Equal(t, abi.SectorNumber(10), deal.SectorNumber) + }, + }) + }) + t.Run("succeeds, active", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealAwaitingPreCommit, clientstates.VerifyDealPreCommitted, testCase{ + nodeParams: nodeParams{PreCommittedIsActive: true}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) + }, + }) + }) + t.Run("fails synchronously", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealAwaitingPreCommit, clientstates.VerifyDealPreCommitted, testCase{ + nodeParams: nodeParams{DealPreCommittedSyncError: errors.New("Something went wrong")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error waiting for deal pre-commit message to appear on chain: Something went wrong", deal.Message) + }, + }) + }) + t.Run("fails asynchronously", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealAwaitingPreCommit, clientstates.VerifyDealPreCommitted, testCase{ + nodeParams: nodeParams{DealPreCommittedAsyncError: errors.New("Something went wrong later")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error waiting for deal pre-commit message to appear on chain: Something went wrong later", deal.Message) + }, + }) + }) +} + +func TestVerifyDealActivated(t *testing.T) { + t.Run("succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) + }, + }) + }) + t.Run("fails synchronously", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ + nodeParams: nodeParams{DealCommittedSyncError: errors.New("Something went wrong")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error in deal activation: Something went wrong", deal.Message) + }, + }) + }) + t.Run("fails asynchronously", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ + nodeParams: nodeParams{DealCommittedAsyncError: errors.New("Something went wrong later")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error in deal activation: Something went wrong later", deal.Message) + }, + }) + }) +} + +func TestWaitForDealCompletion(t *testing.T) { + t.Run("slashing succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ + nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(5)}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealSlashed, deal.State) + assert.Equal(t, abi.ChainEpoch(5), deal.SlashEpoch) + }, + }) + }) + t.Run("expiration succeeds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ + // OnDealSlashedEpoch of zero signals to test node to call onDealExpired() + nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(0)}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealExpired, deal.State) + }, + }) + }) + t.Run("slashing fails", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ + nodeParams: nodeParams{OnDealSlashedError: errors.New("an err")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error waiting for deal completion: deal slashing err: an err", deal.Message) + }, + }) + }) + t.Run("expiration fails", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ + nodeParams: nodeParams{OnDealExpiredError: errors.New("an err")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error waiting for deal completion: deal expiration err: an err", deal.Message) + }, + }) + }) + t.Run("fails synchronously", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ + nodeParams: nodeParams{WaitForDealCompletionError: errors.New("an err")}, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, "error waiting for deal completion: an err", deal.Message) + }, + }) + }) +} + +func TestFailDeal(t *testing.T) { + t.Run("releases funds", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealFailing, clientstates.FailDeal, testCase{ + stateParams: dealStateParams{ + reserveFunds: true, + }, + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, env.node.DealFunds.ReleaseCalls[0], deal.Proposal.ClientBalanceRequirement()) + assert.True(t, deal.FundsReserved.Nil() || deal.FundsReserved.IsZero()) + }, + }) + }) + t.Run("funds already released", func(t *testing.T) { + runAndInspect(t, storagemarket.StorageDealFailing, clientstates.FailDeal, testCase{ + inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + assert.True(t, deal.FundsReserved.Nil() || deal.FundsReserved.IsZero()) + }, + }) + }) +} + +type envParams struct { + dealStream *tut.TestStorageDealStream + startDataTransferError error + restartDataTransferError error + dataTransferChannelId datatransfer.ChannelID + manualTransfer bool + providerDealState *storagemarket.ProviderDealState + getDealStatusErr error + pollingInterval time.Duration +} + +type dealStateParams struct { + addFundsCid *cid.Cid + reserveFunds bool + fastRetrieval bool + startEpoch abi.ChainEpoch +} + +type executor func(t *testing.T, + nodeParams nodeParams, + envParams envParams, + dealInspector func(deal storagemarket.ClientDeal, env *fakeEnvironment)) + +func makeExecutor(ctx context.Context, + eventProcessor fsm.EventProcessor, + initialState storagemarket.StorageDealStatus, + stateEntryFunc clientstates.ClientStateEntryFunc, + dealParams dealStateParams, + clientDealProposal *market.ClientDealProposal) executor { + return func(t *testing.T, + nodeParams nodeParams, + envParams envParams, + dealInspector func(deal storagemarket.ClientDeal, env *fakeEnvironment)) { + node := makeNode(nodeParams) + dealState, err := tut.MakeTestClientDeal(initialState, clientDealProposal, envParams.manualTransfer) + assert.NoError(t, err) + dealState.AddFundsCid = &tut.GenerateCids(1)[0] + dealState.FastRetrieval = dealParams.fastRetrieval + dealState.TransferChannelID = &datatransfer.ChannelID{} + + if dealParams.addFundsCid != nil { + dealState.AddFundsCid = dealParams.addFundsCid + } + if dealParams.reserveFunds { + dealState.FundsReserved = clientDealProposal.Proposal.ClientBalanceRequirement() + } + if dealParams.startEpoch != 0 { + dealState.Proposal.StartEpoch = dealParams.startEpoch + } + + environment := &fakeEnvironment{ + node: node, + dealStream: envParams.dealStream, + startDataTransferError: envParams.startDataTransferError, + startDataTransferChannelId: envParams.dataTransferChannelId, + restartDataTransferError: envParams.restartDataTransferError, + providerDealState: envParams.providerDealState, + getDealStatusErr: envParams.getDealStatusErr, + pollingInterval: envParams.pollingInterval, + peerTagger: tut.NewTestPeerTagger(), + } + + if environment.pollingInterval == 0 { + environment.pollingInterval = 0 + } + + fsmCtx := fsmtest.NewTestContext(ctx, eventProcessor) + err = stateEntryFunc(fsmCtx, environment, *dealState) + assert.NoError(t, err) + time.Sleep(10 * time.Millisecond) + fsmCtx.ReplayEvents(t, dealState) + dealInspector(*dealState, environment) + } +} + +type nodeParams struct { + CurrentEpoch abi.ChainEpoch + AddFundsCid cid.Cid + ReserveFundsError error + VerifySignatureFails bool + GetBalanceError error + GetChainHeadError error + WaitForMessageBlocks bool + WaitForMessageError error + WaitForMessageExitCode exitcode.ExitCode + WaitForMessageRetBytes []byte + ClientAddr address.Address + ValidationError error + ValidatePublishedDealID abi.DealID + ValidatePublishedError error + PreCommittedSectorNumber abi.SectorNumber + PreCommittedIsActive bool + DealPreCommittedSyncError error + DealPreCommittedAsyncError error + DealCommittedSyncError error + DealCommittedAsyncError error + WaitForDealCompletionError error + OnDealExpiredError error + OnDealSlashedError error + OnDealSlashedEpoch abi.ChainEpoch +} + +func makeNode(params nodeParams) *testnodes.FakeClientNode { + var out testnodes.FakeClientNode + out.SMState = testnodes.NewStorageMarketState() + if params.CurrentEpoch != 0 { + out.SMState.Epoch = params.CurrentEpoch + } + + out.DealFunds = tut.NewTestDealFunds() + out.AddFundsCid = params.AddFundsCid + out.ReserveFundsError = params.ReserveFundsError + out.VerifySignatureFails = params.VerifySignatureFails + out.GetBalanceError = params.GetBalanceError + out.GetChainHeadError = params.GetChainHeadError + out.WaitForMessageBlocks = params.WaitForMessageBlocks + out.WaitForMessageError = params.WaitForMessageError + out.WaitForMessageExitCode = params.WaitForMessageExitCode + out.WaitForMessageRetBytes = params.WaitForMessageRetBytes + out.ClientAddr = params.ClientAddr + out.ValidationError = params.ValidationError + out.ValidatePublishedDealID = params.ValidatePublishedDealID + out.ValidatePublishedError = params.ValidatePublishedError + out.PreCommittedSectorNumber = params.PreCommittedSectorNumber + out.PreCommittedIsActive = params.PreCommittedIsActive + out.DealPreCommittedSyncError = params.DealPreCommittedSyncError + out.DealPreCommittedAsyncError = params.DealPreCommittedAsyncError + out.DealCommittedSyncError = params.DealCommittedSyncError + out.DealCommittedAsyncError = params.DealCommittedAsyncError + out.WaitForDealCompletionError = params.WaitForDealCompletionError + out.OnDealExpiredError = params.OnDealExpiredError + out.OnDealSlashedError = params.OnDealSlashedError + out.OnDealSlashedEpoch = params.OnDealSlashedEpoch + return &out +} + +type fakeEnvironment struct { + node *testnodes.FakeClientNode + dealStream *tut.TestStorageDealStream + + startDataTransferChannelId datatransfer.ChannelID + startDataTransferError error + + startDataTransferCalls []dataTransferParams + + restartDataTransferError error + restartDataTransferCalls []restartDataTransferParams + + providerDealState *storagemarket.ProviderDealState + getDealStatusErr error + pollingInterval time.Duration + peerTagger *tut.TestPeerTagger +} + +type dataTransferParams struct { + to peer.ID + voucher datatransfer.Voucher + baseCid cid.Cid + selector ipld.Node +} + +type restartDataTransferParams struct { + channelId datatransfer.ChannelID +} + +func (fe *fakeEnvironment) StartDataTransfer(_ context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { + fe.startDataTransferCalls = append(fe.startDataTransferCalls, dataTransferParams{ + to: to, + voucher: voucher, + baseCid: baseCid, + selector: selector, + }) + return fe.startDataTransferChannelId, fe.startDataTransferError +} + +func (fe *fakeEnvironment) RestartDataTransfer(_ context.Context, channelId datatransfer.ChannelID) error { + fe.restartDataTransferCalls = append(fe.restartDataTransferCalls, restartDataTransferParams{channelId: channelId}) + + return fe.restartDataTransferError +} + +func (fe *fakeEnvironment) Node() storagemarket.StorageClientNode { + return fe.node +} + +func (fe *fakeEnvironment) WriteDealProposal(_ peer.ID, _ cid.Cid, proposal smnet.Proposal) error { + return fe.dealStream.WriteDealProposal(proposal) +} + +func (fe *fakeEnvironment) NewDealStream(_ context.Context, _ peer.ID) (smnet.StorageDealStream, error) { + return fe.dealStream, nil +} + +func (fe *fakeEnvironment) GetProviderDealState(_ context.Context, _ cid.Cid) (*storagemarket.ProviderDealState, error) { + if fe.getDealStatusErr != nil { + return nil, fe.getDealStatusErr + } + return fe.providerDealState, nil +} + +func (fe *fakeEnvironment) PollingInterval() time.Duration { + return fe.pollingInterval +} + +func (fe *fakeEnvironment) TagPeer(id peer.ID, ident string) { + fe.peerTagger.TagPeer(id, ident) +} + +func (fe *fakeEnvironment) UntagPeer(id peer.ID, ident string) { + fe.peerTagger.UntagPeer(id, ident) +} + +func (fe *fakeEnvironment) CleanBlockstore(proposalCid cid.Cid) error { + return nil +} + +var _ clientstates.ClientDealEnvironment = &fakeEnvironment{} + +type responseParams struct { + proposal *market.ClientDealProposal + state storagemarket.StorageDealStatus + message string + publishMessage *cid.Cid + proposalCid cid.Cid +} + +func testResponseReader(t *testing.T, params responseParams) tut.StorageDealResponseReader { + response := smnet.Response{ + State: params.state, + Proposal: params.proposalCid, + Message: params.message, + PublishMessage: params.publishMessage, + } + + if response.Proposal == cid.Undef { + proposalNd, err := cborutil.AsIpld(params.proposal) + assert.NoError(t, err) + response.Proposal = proposalNd.Cid() + } + + return tut.StubbedStorageResponseReader(smnet.SignedResponse{ + Response: response, + Signature: tut.MakeTestSignature(), + }) +} + +type testCase struct { + envParams envParams + nodeParams nodeParams + stateParams dealStateParams + inspector func(deal storagemarket.ClientDeal, env *fakeEnvironment) +} + +func runAndInspect(t *testing.T, initialState storagemarket.StorageDealStatus, stateFunc clientstates.ClientStateEntryFunc, tc testCase) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.ClientDeal{}, "State", clientstates.ClientEvents) + assert.NoError(t, err) + executor := makeExecutor(ctx, eventProcessor, initialState, stateFunc, tc.stateParams, clientDealProposal) + executor(t, tc.nodeParams, tc.envParams, tc.inspector) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/doc.go new file mode 100644 index 00000000000..becf2462a3d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientstates/doc.go @@ -0,0 +1,13 @@ +/* +Package clientstates contains state machine logic relating to the `StorageMarket`. + +client_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. + +client_states.go contains state handler functions. + +The following diagram illustrates the operation of the client state machine. This diagram is auto-generated from current code and should remain up to date over time: + +https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg + +*/ +package clientstates diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils.go new file mode 100644 index 00000000000..a99b4228f32 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils.go @@ -0,0 +1,112 @@ +// Package clientutils provides utility functions for the storage client & client FSM +package clientutils + +import ( + "context" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-car" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/multiformats/go-multibase" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/writer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +// CommP calculates the commP for a given dataref +// In Markets, CommP = PieceCid. +// We can't rely on the CARv1 payload in the given CARv2 file being deterministic as the client could have +// written a "non-deterministic/unordered" CARv2 file. +// So, we need to do a CARv1 traversal here by giving the traverser a random access CARv2 blockstore that wraps the given CARv2 file. +func CommP(ctx context.Context, bs bstore.Blockstore, data *storagemarket.DataRef, maxTraversalLinks uint64) (cid.Cid, abi.UnpaddedPieceSize, error) { + // if we already have the PieceCid, there's no need to do anything here. + if data.PieceCid != nil { + return *data.PieceCid, data.PieceSize, nil + } + + // It's an error if we don't already have the PieceCid for an offline deal i.e. manual transfer. + if data.TransferType == storagemarket.TTManual { + return cid.Undef, 0, xerrors.New("Piece CID and size must be set for manual transfer") + } + // + // if carPath == "" { + // return cid.Undef, 0, xerrors.New("need Carv2 file path to get a read-only blockstore") + // } + + // // Open a read-only blockstore off the CAR file, wrapped in a filestore so + // // it can read file positional references. + // fs, err := stores.ReadOnlyFilestore(carPath) + // if err != nil { + // return cid.Undef, 0, xerrors.Errorf("failed to open carv2 blockstore: %w", err) + // } + // defer fs.Close() + + // do a CARv1 traversal with the DFS selector. + sc := car.NewSelectiveCar(ctx, bs, []car.Dag{{Root: data.Root, Selector: selectorparse.CommonSelector_ExploreAllRecursively}}, car.MaxTraversalLinks(maxTraversalLinks)) + prepared, err := sc.Prepare() + if err != nil { + return cid.Undef, 0, xerrors.Errorf("failed to prepare CAR: %w", err) + } + + // write out the deterministic CARv1 payload to the CommP writer and calculate the CommP. + commpWriter := &writer.Writer{} + err = prepared.Dump(ctx, commpWriter) + if err != nil { + return cid.Undef, 0, xerrors.Errorf("failed to write CARv1 to commP writer: %w", err) + } + dataCIDSize, err := commpWriter.Sum() + if err != nil { + return cid.Undef, 0, xerrors.Errorf("commpWriter.Sum failed: %w", err) + } + + return dataCIDSize.PieceCID, dataCIDSize.PieceSize.Unpadded(), nil +} + +// VerifyFunc is a function that can validate a signature for a given address and bytes +type VerifyFunc func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) + +// VerifyResponse verifies the signature on the given signed response matches +// the given miner address, using the given signature verification function +func VerifyResponse(ctx context.Context, resp network.SignedResponse, minerAddr address.Address, tok shared.TipSetToken, verifier VerifyFunc) error { + b, err := cborutil.Dump(&resp.Response) + if err != nil { + return err + } + verified, err := verifier(ctx, *resp.Signature, minerAddr, b, tok) + if err != nil { + return err + } + + if !verified { + return xerrors.New("could not verify signature") + } + + return nil +} + +// LabelField makes a label field for a deal proposal as a multibase encoding +// of the payload CID (B58BTC for V0, B64 for V1) +func LabelField(payloadCID cid.Cid) (market.DealLabel, error) { + var cidStr string + var err error + if payloadCID.Version() == 0 { + cidStr, err = payloadCID.StringOfBase(multibase.Base58BTC) + } else { + cidStr, err = payloadCID.StringOfBase(multibase.Base64) + } + if err != nil { + return market.EmptyDealLabel, err + } + + return market.NewLabelFromString(cidStr) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils_test.go new file mode 100644 index 00000000000..2757ad32a4f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/clientutils/clientutils_test.go @@ -0,0 +1,154 @@ +package clientutils_test + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-car" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" + "github.com/filecoin-project/go-fil-markets/stores" +) + +func TestCommP(t *testing.T) { + ctx := context.Background() + + t.Run("when PieceCID is already present on data ref", func(t *testing.T) { + pieceCid := &shared_testutil.GenerateCids(1)[0] + pieceSize := abi.UnpaddedPieceSize(rand.Uint64()) + data := &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + PieceCid: pieceCid, + PieceSize: pieceSize, + } + respcid, ressize, err := clientutils.CommP(ctx, nil, data, 2<<29) + require.NoError(t, err) + require.Equal(t, respcid, *pieceCid) + require.Equal(t, ressize, pieceSize) + }) + + genCommp := func(t *testing.T, ctx context.Context, root cid.Cid, bs bstore.Blockstore) cid.Cid { + data := &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: root, + } + + respcid, _, err := clientutils.CommP(ctx, bs, data, 2<<29) + require.NoError(t, err) + require.NotEqual(t, respcid, cid.Undef) + return respcid + } + + t.Run("when PieceCID needs to be generated", func(t *testing.T) { + file1 := filepath.Join(shared_testutil.ThisDir(t), "../../fixtures/payload.txt") + file2 := filepath.Join(shared_testutil.ThisDir(t), "../../fixtures/payload2.txt") + + var commP [][]cid.Cid + for _, f := range []string{file1, file2} { + rootFull, pathFull := shared_testutil.CreateDenseCARv2(t, f) + rootFilestore, pathFilestore := shared_testutil.CreateRefCARv2(t, f) + + // assert the two files have different contents, but the same DAG root. + assertFilesDiffer(t, pathFull, pathFilestore) + require.Equal(t, rootFull, rootFilestore) + + bsFull, err := blockstore.OpenReadOnly(pathFull, blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + t.Cleanup(func() { bsFull.Close() }) + + bsFilestore, err := blockstore.OpenReadOnly(pathFull, blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + t.Cleanup(func() { bsFilestore.Close() }) + + fsFilestore, err := stores.FilestoreOf(bsFilestore) + + // commPs match for both since it's the same unixfs DAG. + commpFull := genCommp(t, ctx, rootFull, bsFull) + commpFilestore := genCommp(t, ctx, rootFilestore, fsFilestore) + require.EqualValues(t, commpFull, commpFilestore) + + commP = append(commP, []cid.Cid{commpFull, commpFilestore}) + } + + // commP's are different across different files/DAGs. + require.NotEqualValues(t, commP[0][0], commP[1][0]) + require.NotEqualValues(t, commP[0][1], commP[1][1]) + }) +} + +func TestLabelField(t *testing.T) { + payloadCID := shared_testutil.GenerateCids(1)[0] + label, err := clientutils.LabelField(payloadCID) + require.NoError(t, err) + labelStr, err := label.ToString() + require.NoError(t, err) + resultCid, err := cid.Decode(labelStr) + require.NoError(t, err) + require.True(t, payloadCID.Equals(resultCid)) +} + +// this test doesn't belong here, it should be a unit test in CARv2, but we can +// retain as a sentinel test. +// TODO maybe remove and trust that CARv2 behaves well. +func TestNoDuplicatesInCARv2(t *testing.T) { + // The CARv2 file for a UnixFS DAG that has duplicates should NOT have duplicates. + file1 := filepath.Join(shared_testutil.ThisDir(t), "../../fixtures/duplicate_blocks.txt") + _, path := shared_testutil.CreateDenseCARv2(t, file1) + require.NotEmpty(t, path) + defer os.Remove(path) + + v2r, err := carv2.OpenReader(path) + require.NoError(t, err) + defer v2r.Close() + v2rDataReader, err := v2r.DataReader() + require.NoError(t, err) + + // Get a reader over the CARv1 payload of the CARv2 file. + cr, err := car.NewCarReader(v2rDataReader) + require.NoError(t, err) + + seen := make(map[cid.Cid]struct{}) + for { + b, err := cr.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + _, ok := seen[b.Cid()] + require.Falsef(t, ok, "already seen cid %s", b.Cid()) + seen[b.Cid()] = struct{}{} + } +} + +func assertFilesDiffer(t *testing.T, f1Path string, f2Path string) { + f1, err := os.Open(f1Path) + require.NoError(t, err) + defer f1.Close() + + f2, err := os.Open(f2Path) + require.NoError(t, err) + defer f2.Close() + + bzf1, err := ioutil.ReadAll(f1) + require.NoError(t, err) + + bzf2, err := ioutil.ReadAll(f2) + require.NoError(t, err) + + require.NotEqualValues(t, bzf1, bzf2) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager.go new file mode 100644 index 00000000000..1bd9b67484f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager.go @@ -0,0 +1,63 @@ +// Package connmanager tracks open connections maping storage proposal CID -> StorageDealStream +package connmanager + +import ( + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +// ConnManager is a simple threadsafe map of proposal CID -> network deal stream +type ConnManager struct { + connsLk sync.RWMutex + conns map[cid.Cid]network.StorageDealStream +} + +// NewConnManager returns a new conn manager +func NewConnManager() *ConnManager { + return &ConnManager{ + conns: map[cid.Cid]network.StorageDealStream{}, + } +} + +// DealStream returns the deal stream for the given proposal, or an error if not present +func (c *ConnManager) DealStream(proposalCid cid.Cid) (network.StorageDealStream, error) { + c.connsLk.RLock() + s, ok := c.conns[proposalCid] + c.connsLk.RUnlock() + if ok { + return s, nil + } + return nil, xerrors.New("no connection to provider") +} + +// AddStream adds the given stream to the conn manager, and errors if one already +// exists for the given proposal CID +func (c *ConnManager) AddStream(proposalCid cid.Cid, s network.StorageDealStream) error { + c.connsLk.Lock() + defer c.connsLk.Unlock() + _, ok := c.conns[proposalCid] + if ok { + return xerrors.Errorf("already have connected for proposal %s", proposalCid) + } + c.conns[proposalCid] = s + return nil +} + +// Disconnect removes the given connection from the conn manager and closes +// the stream. It errors if an error occurs closing the stream +func (c *ConnManager) Disconnect(proposalCid cid.Cid) error { + c.connsLk.Lock() + defer c.connsLk.Unlock() + s, ok := c.conns[proposalCid] + if !ok { + return nil + } + + err := s.Close() + delete(c.conns, proposalCid) + return err +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager_test.go new file mode 100644 index 00000000000..36a6a985971 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/connmanager/connmanager_test.go @@ -0,0 +1,103 @@ +package connmanager_test + +import ( + "sync" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +func TestConnManager(t *testing.T) { + conns := connmanager.NewConnManager() + cids := shared_testutil.GenerateCids(10) + streams := make([]network.StorageDealStream, 0, 10) + var wait sync.WaitGroup + + for i := 0; i < 10; i++ { + streams = append(streams, shared_testutil.NewTestStorageDealStream( + shared_testutil.TestStorageDealStreamParams{})) + } + t.Run("no conns present initially", func(t *testing.T) { + for _, c := range cids { + stream, err := conns.DealStream(c) + require.Nil(t, stream) + require.Error(t, err) + } + }) + + t.Run("adding conns, can retrieve", func(t *testing.T) { + for i, c := range cids { + wait.Add(1) + stream := streams[i] + go func(c cid.Cid, stream network.StorageDealStream) { + defer wait.Done() + err := conns.AddStream(c, stream) + require.NoError(t, err) + }(c, stream) + } + wait.Wait() + for i, c := range cids { + wait.Add(1) + stream := streams[i] + go func(c cid.Cid, stream network.StorageDealStream) { + defer wait.Done() + received, err := conns.DealStream(c) + require.Equal(t, stream, received) + require.NoError(t, err) + }(c, stream) + } + wait.Wait() + }) + + t.Run("adding conns twice fails", func(t *testing.T) { + for i, c := range cids { + wait.Add(1) + stream := streams[i] + go func(c cid.Cid, stream network.StorageDealStream) { + defer wait.Done() + err := conns.AddStream(c, stream) + require.Error(t, err) + }(c, stream) + } + wait.Wait() + }) + + t.Run("disconnection removes", func(t *testing.T) { + for _, c := range cids { + wait.Add(1) + go func(c cid.Cid) { + defer wait.Done() + err := conns.Disconnect(c) + require.NoError(t, err) + }(c) + } + wait.Wait() + for _, c := range cids { + wait.Add(1) + go func(c cid.Cid) { + defer wait.Done() + received, err := conns.DealStream(c) + require.Nil(t, received) + require.Error(t, err) + }(c) + } + wait.Wait() + }) + + t.Run("disconnecting twice causes no error", func(t *testing.T) { + for _, c := range cids { + wait.Add(1) + go func(c cid.Cid) { + defer wait.Done() + err := conns.Disconnect(c) + require.NoError(t, err) + }(c) + } + wait.Wait() + }) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/doc.go new file mode 100644 index 00000000000..3f1e24151a5 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/doc.go @@ -0,0 +1,6 @@ +/* +Package storageimpl provides the primary implementation of storage market top level interfaces + +This package provides a production implementation of `StorageClient` and `StorageProvider`. +*/ +package storageimpl diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils.go new file mode 100644 index 00000000000..ed4b0b17c3d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils.go @@ -0,0 +1,161 @@ +// Package dtutils provides event listeners for the client and provider to +// listen for events on the data transfer module and dispatch FSM events based on them +package dtutils + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync/storeutil" + bstore "github.com/ipfs/go-ipfs-blockstore" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-ipld-prime" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" +) + +var log = logging.Logger("storagemarket_impl") + +// EventReceiver is any thing that can receive FSM events +type EventReceiver interface { + Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) +} + +// ProviderDataTransferSubscriber is the function called when an event occurs in a data +// transfer received by a provider -- it reads the voucher to verify this event occurred +// in a storage market deal, then, based on the data transfer event that occurred, it generates +// and update message for the deal -- either moving to staged for a completion +// event or moving to error if a data transfer error occurs +func ProviderDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { + return func(event datatransfer.Event, channelState datatransfer.ChannelState) { + voucher, ok := channelState.Voucher().(*requestvalidation.StorageDataTransferVoucher) + // if this event is for a transfer not related to storage, ignore + if !ok { + log.Debugw("ignoring data-transfer event as it's not storage related", "event", datatransfer.Events[event.Code], "channelID", + channelState.ChannelID()) + return + } + + log.Debugw("processing storage provider dt event", "event", datatransfer.Events[event.Code], "proposalCid", voucher.Proposal, "channelID", + channelState.ChannelID(), "channelState", datatransfer.Statuses[channelState.Status()]) + + if channelState.Status() == datatransfer.Completed { + err := deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferCompleted) + if err != nil { + log.Errorf("processing dt event: %s", err) + } + } + + // Translate from data transfer events to provider FSM events + // Note: We ignore data transfer progress events (they do not affect deal state) + err := func() error { + switch event.Code { + case datatransfer.Cancel: + return deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferCancelled) + case datatransfer.Restart: + return deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferRestarted, channelState.ChannelID()) + case datatransfer.Disconnected: + return deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferStalled) + case datatransfer.Open: + return deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferInitiated, channelState.ChannelID()) + case datatransfer.Error: + return deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferFailed, fmt.Errorf("deal data transfer failed: %s", event.Message)) + default: + return nil + } + }() + if err != nil { + log.Errorw("error processing storage provider dt event", "event", datatransfer.Events[event.Code], "proposalCid", voucher.Proposal, "channelID", + channelState.ChannelID(), "err", err) + } + } +} + +// ClientDataTransferSubscriber is the function called when an event occurs in a data +// transfer initiated on the client -- it reads the voucher to verify this even occurred +// in a storage market deal, then, based on the data transfer event that occurred, it dispatches +// an event to the appropriate state machine +func ClientDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { + return func(event datatransfer.Event, channelState datatransfer.ChannelState) { + voucher, ok := channelState.Voucher().(*requestvalidation.StorageDataTransferVoucher) + // if this event is for a transfer not related to storage, ignore + if !ok { + return + } + + // Note: We ignore data transfer progress events (they do not affect deal state) + log.Debugw("processing storage client dt event", "event", datatransfer.Events[event.Code], "proposalCid", voucher.Proposal, "channelID", + channelState.ChannelID(), "channelState", datatransfer.Statuses[channelState.Status()]) + + if channelState.Status() == datatransfer.Completed { + err := deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferComplete) + if err != nil { + log.Errorf("processing dt event: %s", err) + } + } + + err := func() error { + switch event.Code { + case datatransfer.Cancel: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferCancelled) + case datatransfer.Restart: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferRestarted, channelState.ChannelID()) + case datatransfer.Disconnected: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferStalled) + case datatransfer.TransferRequestQueued: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferQueued, channelState.ChannelID()) + case datatransfer.Accept: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferInitiated, channelState.ChannelID()) + case datatransfer.Error: + return deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferFailed, fmt.Errorf("deal data transfer failed: %s", event.Message)) + default: + return nil + } + }() + if err != nil { + log.Errorw("error processing storage client dt event", "event", datatransfer.Events[event.Code], "proposalCid", voucher.Proposal, "channelID", + channelState.ChannelID(), "err", err) + } + } +} + +// StoreGetter retrieves the store for a given proposal cid +type StoreGetter interface { + Get(proposalCid cid.Cid) (bstore.Blockstore, error) +} + +// StoreConfigurableTransport defines the methods needed to +// configure a data transfer transport use a unique store for a given request +type StoreConfigurableTransport interface { + UseStore(datatransfer.ChannelID, ipld.LinkSystem) error +} + +// TransportConfigurer configurers the graphsync transport to use a custom blockstore per deal +func TransportConfigurer(storeGetter StoreGetter) datatransfer.TransportConfigurer { + return func(channelID datatransfer.ChannelID, voucher datatransfer.Voucher, transport datatransfer.Transport) { + storageVoucher, ok := voucher.(*requestvalidation.StorageDataTransferVoucher) + if !ok { + return + } + gsTransport, ok := transport.(StoreConfigurableTransport) + if !ok { + return + } + store, err := storeGetter.Get(storageVoucher.Proposal) + if err != nil { + log.Errorf("attempting to configure data store: %s", err) + return + } + if store == nil { + return + } + err = gsTransport.UseStore(channelID, storeutil.LinkSystemForBlockstore(store)) + if err != nil { + log.Errorf("attempting to configure data store: %s", err) + } + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils_test.go new file mode 100644 index 00000000000..cdd5dacb7d1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/dtutils/dtutils_test.go @@ -0,0 +1,370 @@ +package dtutils_test + +import ( + "context" + "errors" + "testing" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + bs "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-ipld-prime" + peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" +) + +func TestProviderDataTransferSubscriber(t *testing.T) { + ps := shared_testutil.GeneratePeers(2) + init := ps[0] + resp := ps[1] + tid := datatransfer.TransferID(1) + expectedProposalCID := shared_testutil.GenerateCids(1)[0] + tests := map[string]struct { + code datatransfer.EventCode + message string + status datatransfer.Status + called bool + voucher datatransfer.Voucher + expectedID interface{} + expectedEvent fsm.EventName + expectedArgs []interface{} + }{ + "not a storage voucher": { + called: false, + voucher: nil, + }, + "open event": { + code: datatransfer.Open, + status: datatransfer.Requested, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ProviderEventDataTransferInitiated, + expectedArgs: []interface{}{datatransfer.ChannelID{Initiator: init, Responder: resp, ID: tid}}, + }, + "restart event": { + code: datatransfer.Restart, + status: datatransfer.Ongoing, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ProviderEventDataTransferRestarted, + expectedArgs: []interface{}{datatransfer.ChannelID{Initiator: init, Responder: resp, ID: tid}}, + }, + "disconnected event": { + code: datatransfer.Disconnected, + status: datatransfer.Ongoing, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ProviderEventDataTransferStalled, + }, + "completion status": { + code: datatransfer.Complete, + status: datatransfer.Completed, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ProviderEventDataTransferCompleted, + }, + "data received": { + code: datatransfer.DataReceived, + status: datatransfer.Ongoing, + called: false, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + }, + "error event": { + code: datatransfer.Error, + message: "something went wrong", + status: datatransfer.Failed, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ProviderEventDataTransferFailed, + expectedArgs: []interface{}{errors.New("deal data transfer failed: something went wrong")}, + }, + "other event": { + code: datatransfer.DataSent, + status: datatransfer.Ongoing, + called: false, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + fdg := &fakeDealGroup{} + subscriber := dtutils.ProviderDataTransferSubscriber(fdg) + subscriber(datatransfer.Event{Code: data.code, Message: data.message}, shared_testutil.NewTestChannel( + shared_testutil.TestChannelParams{Vouchers: []datatransfer.Voucher{data.voucher}, Status: data.status, + Sender: init, Recipient: resp, TransferID: tid, IsPull: false}, + )) + if data.called { + require.True(t, fdg.called) + require.Equal(t, fdg.lastID, data.expectedID) + require.Equal(t, fdg.lastEvent, data.expectedEvent) + require.Equal(t, fdg.lastArgs, data.expectedArgs) + } else { + require.False(t, fdg.called) + } + }) + } +} + +func TestClientDataTransferSubscriber(t *testing.T) { + ps := shared_testutil.GeneratePeers(2) + init := ps[0] + resp := ps[1] + tid := datatransfer.TransferID(1) + + expectedProposalCID := shared_testutil.GenerateCids(1)[0] + tests := map[string]struct { + code datatransfer.EventCode + message string + status datatransfer.Status + called bool + voucher datatransfer.Voucher + expectedID interface{} + expectedEvent fsm.EventName + expectedArgs []interface{} + }{ + "not a storage voucher": { + called: false, + voucher: nil, + }, + "completion event": { + code: datatransfer.Complete, + status: datatransfer.Completed, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ClientEventDataTransferComplete, + }, + "restart event": { + code: datatransfer.Restart, + status: datatransfer.Ongoing, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ClientEventDataTransferRestarted, + expectedArgs: []interface{}{datatransfer.ChannelID{Initiator: init, Responder: resp, ID: tid}}, + }, + "disconnected event": { + code: datatransfer.Disconnected, + status: datatransfer.Ongoing, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ClientEventDataTransferStalled, + }, + "accept event": { + code: datatransfer.Accept, + status: datatransfer.Requested, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ClientEventDataTransferInitiated, + expectedArgs: []interface{}{datatransfer.ChannelID{Initiator: init, Responder: resp, ID: tid}}, + }, + "error event": { + code: datatransfer.Error, + message: "something went wrong", + status: datatransfer.Failed, + called: true, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + expectedID: expectedProposalCID, + expectedEvent: storagemarket.ClientEventDataTransferFailed, + expectedArgs: []interface{}{errors.New("deal data transfer failed: something went wrong")}, + }, + "other event": { + code: datatransfer.DataReceived, + status: datatransfer.Ongoing, + called: false, + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + }, + } + + for test, data := range tests { + t.Run(test, func(t *testing.T) { + fdg := &fakeDealGroup{} + subscriber := dtutils.ClientDataTransferSubscriber(fdg) + subscriber(datatransfer.Event{Code: data.code, Message: data.message}, shared_testutil.NewTestChannel( + shared_testutil.TestChannelParams{Vouchers: []datatransfer.Voucher{data.voucher}, Status: data.status, + Sender: init, Recipient: resp, TransferID: tid, IsPull: false}, + )) + if data.called { + require.True(t, fdg.called) + require.Equal(t, fdg.lastID, data.expectedID) + require.Equal(t, fdg.lastEvent, data.expectedEvent) + require.Equal(t, fdg.lastArgs, data.expectedArgs) + } else { + require.False(t, fdg.called) + } + }) + } +} + +func TestTransportConfigurer(t *testing.T) { + expectedProposalCID := shared_testutil.GenerateCids(1)[0] + expectedChannelID := shared_testutil.MakeTestChannelID() + + testCases := map[string]struct { + voucher datatransfer.Voucher + transport datatransfer.Transport + returnedStore bs.Blockstore + returnedStoreErr error + getterCalled bool + useStoreCalled bool + }{ + "non-storage voucher": { + voucher: nil, + getterCalled: false, + }, + "non-configurable transport": { + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + transport: &fakeTransport{}, + getterCalled: false, + }, + "store getter errors": { + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + transport: &fakeGsTransport{Transport: &fakeTransport{}}, + getterCalled: true, + useStoreCalled: false, + returnedStore: nil, + returnedStoreErr: errors.New("something went wrong"), + }, + "store getter succeeds": { + voucher: &requestvalidation.StorageDataTransferVoucher{ + Proposal: expectedProposalCID, + }, + transport: &fakeGsTransport{Transport: &fakeTransport{}}, + getterCalled: true, + useStoreCalled: true, + returnedStore: bs.NewBlockstore(ds.NewMapDatastore()), + returnedStoreErr: nil, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + storeGetter := &fakeStoreGetter{returnedErr: data.returnedStoreErr, returnedStore: data.returnedStore} + transportConfigurer := dtutils.TransportConfigurer(storeGetter) + transportConfigurer(expectedChannelID, data.voucher, data.transport) + if data.getterCalled { + require.True(t, storeGetter.called) + require.Equal(t, expectedProposalCID, storeGetter.lastProposalCid) + fgt, ok := data.transport.(*fakeGsTransport) + require.True(t, ok) + if data.useStoreCalled { + require.True(t, fgt.called) + require.Equal(t, expectedChannelID, fgt.lastChannelID) + } else { + require.False(t, fgt.called) + } + } else { + require.False(t, storeGetter.called) + } + }) + } +} + +type fakeDealGroup struct { + returnedErr error + called bool + lastID interface{} + lastEvent fsm.EventName + lastArgs []interface{} +} + +func (fdg *fakeDealGroup) Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) { + fdg.lastID = id + fdg.lastEvent = name + fdg.lastArgs = args + fdg.called = true + return fdg.returnedErr +} + +type fakeStoreGetter struct { + lastProposalCid cid.Cid + returnedErr error + returnedStore bs.Blockstore + called bool +} + +func (fsg *fakeStoreGetter) Get(proposalCid cid.Cid) (bs.Blockstore, error) { + fsg.lastProposalCid = proposalCid + fsg.called = true + return fsg.returnedStore, fsg.returnedErr +} + +type fakeTransport struct{} + +func (ft *fakeTransport) OpenChannel(ctx context.Context, dataSender peer.ID, channelID datatransfer.ChannelID, root ipld.Link, stor ipld.Node, channel datatransfer.ChannelState, msg datatransfer.Message) error { + return nil +} + +func (ft *fakeTransport) CloseChannel(ctx context.Context, chid datatransfer.ChannelID) error { + return nil +} + +func (ft *fakeTransport) SetEventHandler(events datatransfer.EventsHandler) error { + return nil +} + +func (ft *fakeTransport) CleanupChannel(chid datatransfer.ChannelID) { +} + +func (ft *fakeTransport) Shutdown(context.Context) error { + return nil +} + +type fakeGsTransport struct { + datatransfer.Transport + lastChannelID datatransfer.ChannelID + lastLinkSystem ipld.LinkSystem + called bool +} + +func (fgt *fakeGsTransport) UseStore(channelID datatransfer.ChannelID, lsys ipld.LinkSystem) error { + fgt.lastChannelID = channelID + fgt.lastLinkSystem = lsys + fgt.called = true + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider.go new file mode 100644 index 00000000000..a5d1c64ab07 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider.go @@ -0,0 +1,991 @@ +package storageimpl + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "time" + + "github.com/hannahhoward/go-pubsub" + "github.com/hashicorp/go-multierror" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/ffiwrapper" + datatransfer "github.com/filecoin-project/go-data-transfer" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedfsm "github.com/filecoin-project/go-ds-versioning/pkg/fsm" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-padreader" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + provider "github.com/filecoin-project/index-provider" + "github.com/filecoin-project/index-provider/metadata" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/stores" +) + +var _ storagemarket.StorageProvider = &Provider{} +var _ network.StorageReceiver = &Provider{} + +const defaultAwaitRestartTimeout = 1 * time.Hour + +// StoredAsk is an interface which provides access to a StorageAsk +type StoredAsk interface { + GetAsk() *storagemarket.SignedStorageAsk + SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error +} + +type MeshCreator interface { + Connect(context.Context) error +} + +// Provider is the production implementation of the StorageProvider interface +type Provider struct { + net network.StorageMarketNetwork + meshCreator MeshCreator + spn storagemarket.StorageProviderNode + fs filestore.FileStore + pieceStore piecestore.PieceStore + conns *connmanager.ConnManager + storedAsk StoredAsk + actor address.Address + dataTransfer datatransfer.Manager + customDealDeciderFunc DealDeciderFunc + awaitTransferRestartTimeout time.Duration + pubSub *pubsub.PubSub + readyMgr *shared.ReadyManager + + deals fsm.Group + migrateDeals func(context.Context) error + + unsubDataTransfer datatransfer.Unsubscribe + + dagStore stores.DAGStoreWrapper + indexProvider provider.Interface + stores *stores.ReadWriteBlockstores +} + +// StorageProviderOption allows custom configuration of a storage provider +type StorageProviderOption func(p *Provider) + +// DealDeciderFunc is a function which evaluates an incoming deal to decide if +// it its accepted +// It returns: +// - boolean = true if deal accepted, false if rejected +// - string = reason deal was not excepted, if rejected +// - error = if an error occurred trying to decide +type DealDeciderFunc func(context.Context, storagemarket.MinerDeal) (bool, string, error) + +// CustomDealDecisionLogic allows a provider to call custom decision logic when validating incoming +// deal proposals +func CustomDealDecisionLogic(decider DealDeciderFunc) StorageProviderOption { + return func(p *Provider) { + p.customDealDeciderFunc = decider + } +} + +// AwaitTransferRestartTimeout sets the maximum amount of time a provider will +// wait for a client to restart a data transfer when the node starts up before +// failing the deal +func AwaitTransferRestartTimeout(waitTime time.Duration) StorageProviderOption { + return func(p *Provider) { + p.awaitTransferRestartTimeout = waitTime + } +} + +// NewProvider returns a new storage provider +func NewProvider(net network.StorageMarketNetwork, + ds datastore.Batching, + fs filestore.FileStore, + dagStore stores.DAGStoreWrapper, + indexer provider.Interface, + pieceStore piecestore.PieceStore, + dataTransfer datatransfer.Manager, + spn storagemarket.StorageProviderNode, + minerAddress address.Address, + storedAsk StoredAsk, + meshCreator MeshCreator, + options ...StorageProviderOption, +) (storagemarket.StorageProvider, error) { + h := &Provider{ + net: net, + meshCreator: meshCreator, + spn: spn, + fs: fs, + pieceStore: pieceStore, + conns: connmanager.NewConnManager(), + storedAsk: storedAsk, + actor: minerAddress, + dataTransfer: dataTransfer, + pubSub: pubsub.New(providerDispatcher), + readyMgr: shared.NewReadyManager(), + dagStore: dagStore, + stores: stores.NewReadWriteBlockstores(), + awaitTransferRestartTimeout: defaultAwaitRestartTimeout, + indexProvider: indexer, + } + storageMigrations, err := migrations.ProviderMigrations.Build() + if err != nil { + return nil, err + } + h.deals, h.migrateDeals, err = newProviderStateMachine( + ds, + &providerDealEnvironment{h}, + h.dispatch, + storageMigrations, + versioning.VersionKey("2"), + ) + if err != nil { + return nil, err + } + h.Configure(options...) + + // register a data transfer event handler -- this will send events to the state machines based on DT events + h.unsubDataTransfer = dataTransfer.SubscribeToEvents(dtutils.ProviderDataTransferSubscriber(h.deals)) + + pph := &providerPushDeals{h} + err = dataTransfer.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, requestvalidation.NewUnifiedRequestValidator(pph, nil)) + if err != nil { + return nil, err + } + + err = dataTransfer.RegisterTransportConfigurer(&requestvalidation.StorageDataTransferVoucher{}, dtutils.TransportConfigurer(&providerStoreGetter{h})) + if err != nil { + return nil, err + } + + return h, nil +} + +// Start initializes deal processing on a StorageProvider and restarts in progress deals. +// It also registers the provider with a StorageMarketNetwork so it can receive incoming +// messages on the storage market's libp2p protocols +func (p *Provider) Start(ctx context.Context) error { + err := p.net.SetDelegate(p) + if err != nil { + return err + } + go func() { + err := p.start(ctx) + if err != nil { + log.Error(err.Error()) + } + }() + + // connect the index provider node with the full node and protect that connection + if err := p.meshCreator.Connect(ctx); err != nil { + log.Errorf("failed to connect index provider host with the full node: %s", err) + } + + go func() { + for { + select { + case <-time.After(time.Minute): + if err := p.meshCreator.Connect(ctx); err != nil { + log.Errorf("failed to connect index provider host with the full node: %s", err) + } + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +// OnReady registers a listener for when the provider has finished starting up +func (p *Provider) OnReady(ready shared.ReadyFunc) { + p.readyMgr.OnReady(ready) +} + +func (p *Provider) AwaitReady() error { + return p.readyMgr.AwaitReady() +} + +/* +HandleDealStream is called by the network implementation whenever a new message is received on the deal protocol + +It initiates the provider side of the deal flow. + +When a provider receives a DealProposal of the deal protocol, it takes the following steps: + +1. Calculates the CID for the received ClientDealProposal. + +2. Constructs a MinerDeal to track the state of this deal. + +3. Tells its statemachine to begin tracking this deal state by CID of the received ClientDealProposal + +4. Tracks the received deal stream by the CID of the ClientDealProposal + +4. Triggers a `ProviderEventOpen` event on its statemachine. + +From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling +`SubscribeToEvents` on the Provider. The Provider handles loading the next block to send to the client. + +Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates +*/ +func (p *Provider) HandleDealStream(s network.StorageDealStream) { + log.Info("Handling storage deal proposal!") + + err := p.receiveDeal(s) + if err != nil { + log.Errorf("%+v", err) + s.Close() + return + } +} + +func (p *Provider) receiveDeal(s network.StorageDealStream) error { + proposal, err := s.ReadDealProposal() + if err != nil { + return xerrors.Errorf("failed to read proposal message: %w", err) + } + + if proposal.DealProposal == nil { + return xerrors.Errorf("failed to get deal proposal from proposal message") + } + + proposalNd, err := cborutil.AsIpld(proposal.DealProposal) + if err != nil { + return fmt.Errorf("getting deal proposal as IPLD: %w", err) + } + + // Check if we are already tracking this deal + var md storagemarket.MinerDeal + if err := p.deals.Get(proposalNd.Cid()).Get(&md); err == nil { + // We are already tracking this deal, for some reason it was re-proposed, perhaps because of a client restart + // this is ok, just send a response back. + return p.resendProposalResponse(s, &md) + } + + if proposal.Piece == nil { + return xerrors.Errorf("failed to get proposal piece from proposal message") + } + + var path string + // create an empty CARv2 file at a temp location that Graphysnc will write the incoming blocks to via a CARv2 ReadWrite blockstore wrapper. + if proposal.Piece.TransferType != storagemarket.TTManual { + tmp, err := p.fs.CreateTemp() + if err != nil { + return xerrors.Errorf("failed to create an empty temp CARv2 file: %w", err) + } + if err := tmp.Close(); err != nil { + _ = os.Remove(string(tmp.OsPath())) + return xerrors.Errorf("failed to close temp file: %w", err) + } + path = string(tmp.OsPath()) + } + + deal := &storagemarket.MinerDeal{ + Client: s.RemotePeer(), + Miner: p.net.ID(), + ClientDealProposal: *proposal.DealProposal, + ProposalCid: proposalNd.Cid(), + State: storagemarket.StorageDealUnknown, + Ref: proposal.Piece, + FastRetrieval: proposal.FastRetrieval, + CreationTime: curTime(), + InboundCAR: path, + } + + err = p.deals.Begin(proposalNd.Cid(), deal) + if err != nil { + return err + } + err = p.conns.AddStream(proposalNd.Cid(), s) + if err != nil { + return err + } + return p.deals.Send(proposalNd.Cid(), storagemarket.ProviderEventOpen) +} + +// Stop terminates processing of deals on a StorageProvider +func (p *Provider) Stop() error { + p.readyMgr.Stop() + p.unsubDataTransfer() + err := p.deals.Stop(context.TODO()) + if err != nil { + return err + } + return p.net.StopHandlingRequests() +} + +// add by lin +func (p *Provider) ImportDataForDealOfSxx(ctx context.Context, propCid cid.Cid, fname string, worker string) error { + // TODO: be able to check if we have enough disk space + var d storagemarket.MinerDeal + if err := p.deals.Get(propCid).Get(&d); err != nil { + return xerrors.Errorf("failed getting deal %s: %w", propCid, err) + } + + tempfi, err := os.Open(fname) + if err != nil { + return xerrors.Errorf("failed to open given file: %w", err) + } + + defer tempfi.Close() + cleanup := func() { + _ = tempfi.Close() + } + + log.Errorf("zlin ImportDataForDealOfSxx worker: %+v", worker) + + filestat, _ := tempfi.Stat() + carSize := uint64(filestat.Size()) + + _, err = tempfi.Seek(0, io.SeekStart) + if err != nil { + cleanup() + return xerrors.Errorf("failed to seek through temp imported file: %w", err) + } + + if carSizePadded := padreader.PaddedSize(carSize).Padded(); carSizePadded < d.Proposal.PieceSize { + // need to pad up! + proofType, err := p.spn.GetProofType(ctx, p.actor, nil) + if err != nil { + cleanup() + return xerrors.Errorf("failed to determine proof type: %w", err) + } + log.Debugw("fetched proof type", "propCid", propCid) + + pieceCid, err := generatePieceCommitment(proofType, tempfi, carSize) + if err != nil { + cleanup() + return xerrors.Errorf("failed to generate commP: %w", err) + } + log.Debugw("generated pieceCid for imported file", "propCid", propCid) + + rawPaddedCommp, err := commp.PadCommP( + // we know how long a pieceCid "hash" is, just blindly extract the trailing 32 bytes + pieceCid.Hash()[len(pieceCid.Hash())-32:], + uint64(carSizePadded), + uint64(d.Proposal.PieceSize), + ) + if err != nil { + cleanup() + return err + } + pieceCid, _ = commcid.DataCommitmentV1ToCID(rawPaddedCommp) + + if !pieceCid.Equals(d.Proposal.PieceCID) { + cleanup() + return xerrors.Errorf("given data does not match expected commP (got: %s, expected %s)", pieceCid, d.Proposal.PieceCID) + } + } + + log.Debugw("will fire ProviderEventVerifiedDataOfSxx for file", "propCid", propCid) + + return p.deals.Send(propCid, storagemarket.ProviderEventVerifiedDataOfSxx, filestore.Path(fname), filestore.Path(""), worker) +} +// end + +// ImportDataForDeal manually imports data for an offline storage deal +// It will verify that the data in the passed io.Reader matches the expected piece +// cid for the given deal or it will error +func (p *Provider) ImportDataForDeal(ctx context.Context, propCid cid.Cid, data io.Reader) error { + // TODO: be able to check if we have enough disk space + var d storagemarket.MinerDeal + if err := p.deals.Get(propCid).Get(&d); err != nil { + return xerrors.Errorf("failed getting deal %s: %w", propCid, err) + } + + + tempfi, err := p.fs.CreateTemp() + if err != nil { + return xerrors.Errorf("failed to create temp file for data import: %w", err) + } + defer tempfi.Close() + cleanup := func() { + _ = tempfi.Close() + _ = p.fs.Delete(tempfi.Path()) + } + + log.Debugw("zlin tempfi.Path: ", tempfi.Path()) + log.Debugw("zlin tempfi.Path: ", tempfi.Path()) + + log.Debugw("will copy imported file to local file", "propCid", propCid) + n, err := io.Copy(tempfi, data) + if err != nil { + cleanup() + return xerrors.Errorf("importing deal data failed: %w", err) + } + log.Debugw("finished copying imported file to local file", "propCid", propCid) + + _ = n // TODO: verify n? + + carSize := uint64(tempfi.Size()) + + _, err = tempfi.Seek(0, io.SeekStart) + if err != nil { + cleanup() + return xerrors.Errorf("failed to seek through temp imported file: %w", err) + } + + proofType, err := p.spn.GetProofType(ctx, p.actor, nil) + if err != nil { + cleanup() + return xerrors.Errorf("failed to determine proof type: %w", err) + } + log.Debugw("fetched proof type", "propCid", propCid) + + pieceCid, err := generatePieceCommitment(proofType, tempfi, carSize) + if err != nil { + cleanup() + return xerrors.Errorf("failed to generate commP: %w", err) + } + log.Debugw("generated pieceCid for imported file", "propCid", propCid) + + if carSizePadded := padreader.PaddedSize(carSize).Padded(); carSizePadded < d.Proposal.PieceSize { + // need to pad up! + rawPaddedCommp, err := commp.PadCommP( + // we know how long a pieceCid "hash" is, just blindly extract the trailing 32 bytes + pieceCid.Hash()[len(pieceCid.Hash())-32:], + uint64(carSizePadded), + uint64(d.Proposal.PieceSize), + ) + if err != nil { + cleanup() + return err + } + pieceCid, _ = commcid.DataCommitmentV1ToCID(rawPaddedCommp) + } + + // Verify CommP matches + if !pieceCid.Equals(d.Proposal.PieceCID) { + cleanup() + return xerrors.Errorf("given data does not match expected commP (got: %s, expected %s)", pieceCid, d.Proposal.PieceCID) + } + + log.Debugw("will fire ProviderEventVerifiedData for imported file", "propCid", propCid) + + return p.deals.Send(propCid, storagemarket.ProviderEventVerifiedData, tempfi.Path(), filestore.Path("")) +} + +func generatePieceCommitment(rt abi.RegisteredSealProof, rd io.Reader, pieceSize uint64) (cid.Cid, error) { + paddedReader, paddedSize := padreader.New(rd, pieceSize) + commitment, err := ffiwrapper.GeneratePieceCIDFromFile(rt, paddedReader, paddedSize) + if err != nil { + return cid.Undef, err + } + return commitment, nil +} + +// GetAsk returns the storage miner's ask, or nil if one does not exist. +func (p *Provider) GetAsk() *storagemarket.SignedStorageAsk { + return p.storedAsk.GetAsk() +} + +// AddStorageCollateral adds storage collateral +func (p *Provider) AddStorageCollateral(ctx context.Context, amount abi.TokenAmount) error { + done := make(chan error, 1) + + mcid, err := p.spn.AddFunds(ctx, p.actor, amount) + if err != nil { + return err + } + + err = p.spn.WaitForMessage(ctx, mcid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { + if err != nil { + done <- xerrors.Errorf("AddFunds errored: %w", err) + } else if code != exitcode.Ok { + done <- xerrors.Errorf("AddFunds error, exit code: %s", code.String()) + } else { + done <- nil + } + return nil + }) + + if err != nil { + return err + } + + return <-done +} + +// GetStorageCollateral returns the current collateral balance +func (p *Provider) GetStorageCollateral(ctx context.Context) (storagemarket.Balance, error) { + tok, _, err := p.spn.GetChainHead(ctx) + if err != nil { + return storagemarket.Balance{}, err + } + + return p.spn.GetBalance(ctx, p.actor, tok) +} + +func (p *Provider) RetryDealPublishing(propcid cid.Cid) error { + return p.deals.Send(propcid, storagemarket.ProviderEventRestart) +} + +func (p *Provider) LocalDealCount() (int, error) { + var out []storagemarket.MinerDeal + if err := p.deals.List(&out); err != nil { + return 0, err + } + return len(out), nil +} + +// ListLocalDeals lists deals processed by this storage provider +func (p *Provider) ListLocalDeals() ([]storagemarket.MinerDeal, error) { + var out []storagemarket.MinerDeal + if err := p.deals.List(&out); err != nil { + return nil, err + } + return out, nil +} + +func (p *Provider) GetLocalDeal(propCid cid.Cid) (storagemarket.MinerDeal, error) { + var d storagemarket.MinerDeal + err := p.deals.Get(propCid).Get(&d) + return d, err +} + +func (p *Provider) ListLocalDealsPage(startPropCid *cid.Cid, offset int, limit int) ([]storagemarket.MinerDeal, error) { + if limit == 0 { + return []storagemarket.MinerDeal{}, nil + } + + // Get all deals + var deals []storagemarket.MinerDeal + if err := p.deals.List(&deals); err != nil { + return nil, err + } + + // Sort by creation time descending + sort.Slice(deals, func(i, j int) bool { + return deals[i].CreationTime.Time().After(deals[j].CreationTime.Time()) + }) + + // Iterate through deals until we reach the target signed proposal cid, + // find the offset from there, then add deals from that point up to limit + page := make([]storagemarket.MinerDeal, 0, limit) + startIndex := -1 + if startPropCid == nil { + startIndex = 0 + } + for i, dl := range deals { + // Find the deal with a proposal cid matching startPropCid + if startPropCid != nil && dl.ProposalCid == *startPropCid { + // Start adding deals from offset after the first matching deal + startIndex = i + offset + } + + if startIndex >= 0 && i >= startIndex { + page = append(page, dl) + } + if len(page) == limit { + return page, nil + } + } + + return page, nil +} + +// SetAsk configures the storage miner's ask with the provided price, +// duration, and options. Any previously-existing ask is replaced. +func (p *Provider) SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error { + return p.storedAsk.SetAsk(price, verifiedPrice, duration, options...) +} + +// AnnounceDealToIndexer informs indexer nodes that a new deal was received, +// so they can download its index +func (p *Provider) AnnounceDealToIndexer(ctx context.Context, proposalCid cid.Cid) error { + var deal storagemarket.MinerDeal + if err := p.deals.Get(proposalCid).Get(&deal); err != nil { + return xerrors.Errorf("failed getting deal %s: %w", proposalCid, err) + } + + mt := metadata.New(&metadata.GraphsyncFilecoinV1{ + PieceCID: deal.Proposal.PieceCID, + FastRetrieval: deal.FastRetrieval, + VerifiedDeal: deal.Proposal.VerifiedDeal, + }) + + if err := p.meshCreator.Connect(ctx); err != nil { + return fmt.Errorf("cannot publish index record as indexer host failed to connect to the full node: %w", err) + } + + annCid, err := p.indexProvider.NotifyPut(ctx, deal.ProposalCid.Bytes(), mt) + if err == nil { + log.Infow("deal announcement sent to index provider", "advertisementCid", annCid, "shard-key", deal.Proposal.PieceCID, + "proposalCid", deal.ProposalCid) + } + return err +} + +func (p *Provider) AnnounceAllDealsToIndexer(ctx context.Context) error { + inSealingSubsystem := make(map[fsm.StateKey]struct{}, len(providerstates.StatesKnownBySealingSubsystem)) + for _, s := range providerstates.StatesKnownBySealingSubsystem { + inSealingSubsystem[s] = struct{}{} + } + + expiredStates := make(map[fsm.StateKey]struct{}, len(providerstates.ProviderFinalityStates)) + for _, s := range providerstates.ProviderFinalityStates { + expiredStates[s] = struct{}{} + } + + log.Info("will announce all active deals to Indexer") + var out []storagemarket.MinerDeal + if err := p.deals.List(&out); err != nil { + return fmt.Errorf("failed to list deals: %w", err) + } + + shards := make(map[string]struct{}) + var nSuccess int + var merr error + + for _, d := range out { + // only announce deals that have been handed off to the sealing subsystem as the rest will get announced anyways + if _, ok := inSealingSubsystem[d.State]; !ok { + continue + } + // only announce deals that have not expired + if _, ok := expiredStates[d.State]; ok { + continue + } + + if err := p.AnnounceDealToIndexer(ctx, d.ProposalCid); err != nil { + merr = multierror.Append(merr, err) + log.Errorw("failed to announce deal to Index provider", "proposalCid", d.ProposalCid, "err", err) + continue + } + shards[d.Proposal.PieceCID.String()] = struct{}{} + nSuccess++ + } + + log.Infow("finished announcing active deals to index provider", "number of deals", nSuccess, "number of shards", shards) + return merr +} + +/* +HandleAskStream is called by the network implementation whenever a new message is received on the ask protocol + +A Provider handling a `AskRequest` does the following: + +1. Reads the current signed storage ask from storage + +2. Wraps the signed ask in an AskResponse and writes it on the StorageAskStream + +The connection is kept open only as long as the request-response exchange. +*/ +func (p *Provider) HandleAskStream(s network.StorageAskStream) { + defer s.Close() + ar, err := s.ReadAskRequest() + if err != nil { + log.Errorf("failed to read AskRequest from incoming stream: %s", err) + return + } + + var ask *storagemarket.SignedStorageAsk + if p.actor != ar.Miner { + log.Warnf("storage provider for address %s receive ask for miner with address %s", p.actor, ar.Miner) + } else { + ask = p.storedAsk.GetAsk() + } + + resp := network.AskResponse{ + Ask: ask, + } + + if err := s.WriteAskResponse(resp, p.sign); err != nil { + log.Errorf("failed to write ask response: %s", err) + return + } +} + +/* +HandleDealStatusStream is called by the network implementation whenever a new message is received on the deal status protocol + +A Provider handling a `DealStatuRequest` does the following: + +1. Lots the deal state from the Provider FSM + +2. Verifies the signature on the DealStatusRequest matches the Client for this deal + +3. Constructs a ProviderDealState from the deal state + +4. Signs the ProviderDealState with its private key + +5. Writes a DealStatusResponse with the ProviderDealState and signature onto the DealStatusStream + +The connection is kept open only as long as the request-response exchange. +*/ +func (p *Provider) HandleDealStatusStream(s network.DealStatusStream) { + ctx := context.TODO() + defer s.Close() + request, err := s.ReadDealStatusRequest() + if err != nil { + log.Errorf("failed to read DealStatusRequest from incoming stream: %s", err) + return + } + + dealState, err := p.processDealStatusRequest(ctx, &request) + if err != nil { + log.Errorf("failed to process deal status request: %s", err) + dealState = &storagemarket.ProviderDealState{ + State: storagemarket.StorageDealError, + Message: err.Error(), + } + } + + signature, err := p.sign(ctx, dealState) + if err != nil { + log.Errorf("failed to sign deal status response: %s", err) + return + } + + response := network.DealStatusResponse{ + DealState: *dealState, + Signature: *signature, + } + + if err := s.WriteDealStatusResponse(response, p.sign); err != nil { + log.Warnf("failed to write deal status response: %s", err) + return + } +} + +func (p *Provider) processDealStatusRequest(ctx context.Context, request *network.DealStatusRequest) (*storagemarket.ProviderDealState, error) { + // fetch deal state + var md = storagemarket.MinerDeal{} + if err := p.deals.Get(request.Proposal).Get(&md); err != nil { + log.Errorf("proposal doesn't exist in state store: %s", err) + return nil, xerrors.Errorf("no such proposal") + } + + // verify query signature + buf, err := cborutil.Dump(&request.Proposal) + if err != nil { + log.Errorf("failed to serialize status request: %s", err) + return nil, xerrors.Errorf("internal error") + } + + tok, _, err := p.spn.GetChainHead(ctx) + if err != nil { + log.Errorf("failed to get chain head: %s", err) + return nil, xerrors.Errorf("internal error") + } + + err = providerutils.VerifySignature(ctx, request.Signature, md.ClientDealProposal.Proposal.Client, buf, tok, p.spn.VerifySignature) + if err != nil { + log.Errorf("invalid deal status request signature: %s", err) + return nil, xerrors.Errorf("internal error") + } + + return &storagemarket.ProviderDealState{ + State: md.State, + Message: md.Message, + Proposal: &md.Proposal, + ProposalCid: &md.ProposalCid, + AddFundsCid: md.AddFundsCid, + PublishCid: md.PublishCid, + DealID: md.DealID, + FastRetrieval: md.FastRetrieval, + }, nil +} + +// Configure applies the given list of StorageProviderOptions after a StorageProvider +// is initialized +func (p *Provider) Configure(options ...StorageProviderOption) { + for _, option := range options { + option(p) + } +} + +// SubscribeToEvents allows another component to listen for events on the StorageProvider +// in order to track deals as they progress through the deal flow +func (p *Provider) SubscribeToEvents(subscriber storagemarket.ProviderSubscriber) shared.Unsubscribe { + return shared.Unsubscribe(p.pubSub.Subscribe(subscriber)) +} + +// dispatch puts the fsm event into a form that pubSub can consume, +// then publishes the event +func (p *Provider) dispatch(eventName fsm.EventName, deal fsm.StateType) { + evt, ok := eventName.(storagemarket.ProviderEvent) + if !ok { + log.Errorf("dropped bad event %s", eventName) + } + realDeal, ok := deal.(storagemarket.MinerDeal) + if !ok { + log.Errorf("not a MinerDeal %v", deal) + } + pubSubEvt := internalProviderEvent{evt, realDeal} + + log.Debugw("process storage provider listeners", "name", storagemarket.ProviderEvents[evt], "proposal cid", realDeal.ProposalCid) + if err := p.pubSub.Publish(pubSubEvt); err != nil { + log.Errorf("failed to publish event %d", evt) + } +} + +func (p *Provider) start(ctx context.Context) (err error) { + defer func() { + publishErr := p.readyMgr.FireReady(err) + if publishErr != nil { + if err != nil { + log.Warnf("failed to publish storage provider ready event with err %s: %s", err, publishErr) + } else { + log.Warnf("failed to publish storage provider ready event: %s", publishErr) + } + } + }() + + // Run datastore and DAG store migrations + deals, err := p.runMigrations(ctx) + if err != nil { + return err + } + + // Fire restart event on all active deals + if err := p.restartDeals(deals); err != nil { + return fmt.Errorf("failed to restart deals: %w", err) + } + + // register indexer provider callback now that everything has booted up. + p.indexProvider.RegisterMultihashLister(func(ctx context.Context, contextID []byte) (provider.MultihashIterator, error) { + proposalCid, err := cid.Cast(contextID) + if err != nil { + return nil, fmt.Errorf("failed to cast context ID to a cid") + } + + var deal storagemarket.MinerDeal + if err := p.deals.Get(proposalCid).Get(&deal); err != nil { + return nil, xerrors.Errorf("failed getting deal %s: %w", proposalCid, err) + } + + ii, err := p.dagStore.GetIterableIndexForPiece(deal.Proposal.PieceCID) + if err != nil { + return nil, fmt.Errorf("failed to get iterable index: %w", err) + } + + mhi, err := provider.CarMultihashIterator(ii) + if err != nil { + return nil, fmt.Errorf("failed to get mhiterator: %w", err) + } + return mhi, nil + }) + + return nil +} + +func (p *Provider) runMigrations(ctx context.Context) ([]storagemarket.MinerDeal, error) { + // Perform datastore migration + err := p.migrateDeals(ctx) + if err != nil { + return nil, fmt.Errorf("migrating storage provider state machines: %w", err) + } + + var deals []storagemarket.MinerDeal + err = p.deals.List(&deals) + if err != nil { + return nil, xerrors.Errorf("failed to fetch deals during startup: %w", err) + } + + // migrate deals to the dagstore if still not migrated. + if ok, err := p.dagStore.MigrateDeals(ctx, deals); err != nil { + return nil, fmt.Errorf("failed to migrate deals to DAG store: %w", err) + } else if ok { + log.Info("dagstore migration completed successfully") + } else { + log.Info("no dagstore migration necessary") + } + + return deals, nil +} + +func (p *Provider) restartDeals(deals []storagemarket.MinerDeal) error { + for _, deal := range deals { + if p.deals.IsTerminated(deal) { + continue + } + + err := p.deals.Send(deal.ProposalCid, storagemarket.ProviderEventRestart) + if err != nil { + return err + } + } + return nil +} + +func (p *Provider) sign(ctx context.Context, data interface{}) (*crypto.Signature, error) { + tok, _, err := p.spn.GetChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("couldn't get chain head: %w", err) + } + + return providerutils.SignMinerData(ctx, data, p.actor, tok, p.spn.GetMinerWorkerAddress, p.spn.SignBytes) +} + +func (p *Provider) resendProposalResponse(s network.StorageDealStream, md *storagemarket.MinerDeal) error { + resp := &network.Response{State: md.State, Message: md.Message, Proposal: md.ProposalCid} + sig, err := p.sign(context.TODO(), resp) + if err != nil { + return xerrors.Errorf("failed to sign response message: %w", err) + } + + err = s.WriteDealResponse(network.SignedResponse{Response: *resp, Signature: sig}, p.sign) + + if closeErr := s.Close(); closeErr != nil { + log.Warnf("closing connection: %v", err) + } + + return err +} + +func newProviderStateMachine(ds datastore.Batching, env fsm.Environment, notifier fsm.Notifier, storageMigrations versioning.VersionedMigrationList, target versioning.VersionKey) (fsm.Group, func(context.Context) error, error) { + return versionedfsm.NewVersionedFSM(ds, fsm.Parameters{ + Environment: env, + StateType: storagemarket.MinerDeal{}, + StateKeyField: "State", + Events: providerstates.ProviderEvents, + StateEntryFuncs: providerstates.ProviderStateEntryFuncs, + FinalityStates: providerstates.ProviderFinalityStates, + Notifier: notifier, + }, storageMigrations, target) +} + +type internalProviderEvent struct { + evt storagemarket.ProviderEvent + deal storagemarket.MinerDeal +} + +func providerDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { + ie, ok := evt.(internalProviderEvent) + if !ok { + return xerrors.New("wrong type of event") + } + cb, ok := fn.(storagemarket.ProviderSubscriber) + if !ok { + return xerrors.New("wrong type of callback") + } + cb(ie.evt, ie.deal) + return nil +} + +// ProviderFSMParameterSpec is a valid set of parameters for a provider FSM - used in doc generation +var ProviderFSMParameterSpec = fsm.Parameters{ + Environment: &providerDealEnvironment{}, + StateType: storagemarket.MinerDeal{}, + StateKeyField: "State", + Events: providerstates.ProviderEvents, + StateEntryFuncs: providerstates.ProviderStateEntryFuncs, + FinalityStates: providerstates.ProviderFinalityStates, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments.go new file mode 100644 index 00000000000..df85b6683bb --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments.go @@ -0,0 +1,248 @@ +package storageimpl + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + carv2 "github.com/ipld/go-car/v2" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/index-provider/metadata" + + "github.com/filecoin-project/go-fil-markets/commp" + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/stores" +) + +// ------- +// providerDealEnvironment +// ------- + +type providerDealEnvironment struct { + p *Provider +} + +func (p *providerDealEnvironment) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool) error { + return stores.RegisterShardSync(ctx, p.p.dagStore, pieceCid, carPath, eagerInit) +} + +// AnnounceIndex informs indexer nodes that a new deal was received, +// so they can download its index +func (p *providerDealEnvironment) AnnounceIndex(ctx context.Context, deal storagemarket.MinerDeal) (advertCid cid.Cid, err error) { + mt := metadata.New(&metadata.GraphsyncFilecoinV1{ + PieceCID: deal.Proposal.PieceCID, + FastRetrieval: deal.FastRetrieval, + VerifiedDeal: deal.Proposal.VerifiedDeal, + }) + + // ensure we have a connection with the full node host so that the index provider gossip sub announcements make their + // way to the filecoin bootstrapper network + if err := p.p.meshCreator.Connect(ctx); err != nil { + return cid.Undef, fmt.Errorf("cannot publish index record as indexer host failed to connect to the full node: %w", err) + } + + return p.p.indexProvider.NotifyPut(ctx, deal.ProposalCid.Bytes(), mt) +} + +func (p *providerDealEnvironment) RemoveIndex(ctx context.Context, proposalCid cid.Cid) error { + _, err := p.p.indexProvider.NotifyRemove(ctx, proposalCid.Bytes()) + return err +} + +func (p *providerDealEnvironment) ReadCAR(path string) (*carv2.Reader, error) { + return carv2.OpenReader(path) +} + +func (p *providerDealEnvironment) FinalizeBlockstore(proposalCid cid.Cid) error { + bs, err := p.p.stores.Get(proposalCid.String()) + if err != nil { + if xerrors.Is(err, stores.ErrNotFound) { + // The blockstore has already been cleaned up + return nil + } + return xerrors.Errorf("failed to get read/write blockstore: %w", err) + } + + if err := bs.Finalize(); err != nil { + return xerrors.Errorf("failed to finalize read/write blockstore: %w", err) + } + + return nil +} + +func (p *providerDealEnvironment) TerminateBlockstore(proposalCid cid.Cid, path string) error { + // stop tracking it. + if err := p.p.stores.Untrack(proposalCid.String()); err != nil { + log.Warnf("failed to untrack read write blockstore, proposalCid=%s, car_path=%s: %s", proposalCid, path, err) + } + + // delete the backing CARv2 file as it was a temporary file we created for + // this storage deal; the piece has now been handed off, or the deal has failed. + if err := os.Remove(path); err != nil { + log.Warnf("failed to delete carv2 file on termination, car_path=%s: %s", path, err) + } + + return nil +} + +func (p *providerDealEnvironment) Address() address.Address { + return p.p.actor +} + +func (p *providerDealEnvironment) Node() storagemarket.StorageProviderNode { + return p.p.spn +} + +func (p *providerDealEnvironment) Ask() storagemarket.StorageAsk { + sask := p.p.storedAsk.GetAsk() + if sask == nil { + return storagemarket.StorageAskUndefined + } + return *sask.Ask +} + +// GeneratePieceCommitment generates the pieceCid for the CARv1 deal payload in +// the CARv2 file that already exists at the given path. +func (p *providerDealEnvironment) GeneratePieceCommitment(proposalCid cid.Cid, carPath string, dealSize abi.PaddedPieceSize) (c cid.Cid, path filestore.Path, finalErr error) { + rd, err := carv2.OpenReader(carPath) + if err != nil { + return cid.Undef, "", xerrors.Errorf("failed to get CARv2 reader, proposalCid=%s, carPath=%s: %w", proposalCid, carPath, err) + } + + defer func() { + if err := rd.Close(); err != nil { + log.Errorf("failed to close CARv2 reader, carPath=%s, err=%s", carPath, err) + + if finalErr == nil { + c = cid.Undef + path = "" + finalErr = xerrors.Errorf("failed to close CARv2 reader, proposalCid=%s, carPath=%s: %w", + proposalCid, carPath, err) + return + } + } + }() + + r, err := rd.DataReader() + if err != nil { + return cid.Undef, "", fmt.Errorf("failed to get data reader over CAR file, proposalCid=%s, carPath=%s: %w", proposalCid, carPath, err) + } + + pieceCID, err := commp.GenerateCommp(r, rd.Header.DataSize, uint64(dealSize)) + return pieceCID, "", err +} + +func (p *providerDealEnvironment) FileStore() filestore.FileStore { + return p.p.fs +} + +func (p *providerDealEnvironment) PieceStore() piecestore.PieceStore { + return p.p.pieceStore +} + +func (p *providerDealEnvironment) SendSignedResponse(ctx context.Context, resp *network.Response) error { + s, err := p.p.conns.DealStream(resp.Proposal) + if err != nil { + return xerrors.Errorf("couldn't send response: %w", err) + } + + sig, err := p.p.sign(ctx, resp) + if err != nil { + return xerrors.Errorf("failed to sign response message: %w", err) + } + + signedResponse := network.SignedResponse{ + Response: *resp, + Signature: sig, + } + + err = s.WriteDealResponse(signedResponse, p.p.sign) + if err != nil { + // Assume client disconnected + _ = p.p.conns.Disconnect(resp.Proposal) + } + return err +} + +func (p *providerDealEnvironment) Disconnect(proposalCid cid.Cid) error { + return p.p.conns.Disconnect(proposalCid) +} + +func (p *providerDealEnvironment) RunCustomDecisionLogic(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { + if p.p.customDealDeciderFunc == nil { + return true, "", nil + } + return p.p.customDealDeciderFunc(ctx, deal) +} + +func (p *providerDealEnvironment) TagPeer(id peer.ID, s string) { + p.p.net.TagPeer(id, s) +} + +func (p *providerDealEnvironment) UntagPeer(id peer.ID, s string) { + p.p.net.UntagPeer(id, s) +} + +func (p *providerDealEnvironment) AwaitRestartTimeout() <-chan time.Time { + timer := time.NewTimer(p.p.awaitTransferRestartTimeout) + return timer.C +} + +var _ providerstates.ProviderDealEnvironment = &providerDealEnvironment{} + +type providerStoreGetter struct { + p *Provider +} + +func (psg *providerStoreGetter) Get(proposalCid cid.Cid) (bstore.Blockstore, error) { + // Wait for the provider to be ready + err := awaitProviderReady(psg.p) + if err != nil { + return nil, err + } + + var deal storagemarket.MinerDeal + err = psg.p.deals.Get(proposalCid).Get(&deal) + if err != nil { + return nil, xerrors.Errorf("failed to get deal state: %w", err) + } + + return psg.p.stores.GetOrOpen(proposalCid.String(), deal.InboundCAR, deal.Ref.Root) +} + +type providerPushDeals struct { + p *Provider +} + +func (ppd *providerPushDeals) Get(proposalCid cid.Cid) (storagemarket.MinerDeal, error) { + // Wait for the provider to be ready + var deal storagemarket.MinerDeal + err := awaitProviderReady(ppd.p) + if err != nil { + return deal, err + } + + err = ppd.p.deals.GetSync(context.TODO(), proposalCid, &deal) + return deal, err +} + +// awaitProviderReady waits for the provider to startup +func awaitProviderReady(p *Provider) error { + err := p.AwaitReady() + if err != nil { + return xerrors.Errorf("could not get deal with proposal CID %s: error waiting for provider startup: %w") + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments_test.go new file mode 100644 index 00000000000..184ff8460aa --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_environments_test.go @@ -0,0 +1,51 @@ +package storageimpl + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" +) + +func TestGeneratePieceCommitment(t *testing.T) { + // both payload.txt and payload2.txt are about 18kb long + pieceSize := abi.PaddedPieceSize(32768) + + _, carV2File1 := shared_testutil.CreateDenseCARv2(t, filepath.Join(shared_testutil.ThisDir(t), "../fixtures/payload.txt")) + defer os.Remove(carV2File1) + _, carV2File2 := shared_testutil.CreateDenseCARv2(t, filepath.Join(shared_testutil.ThisDir(t), "../fixtures/payload2.txt")) + defer os.Remove(carV2File2) + + commP1 := genProviderCommP(t, carV2File1, pieceSize) + commP2 := genProviderCommP(t, carV2File2, pieceSize) + + commP3 := genProviderCommP(t, carV2File1, pieceSize) + commP4 := genProviderCommP(t, carV2File2, pieceSize) + + require.Equal(t, commP1, commP3) + require.Equal(t, commP2, commP4) + + require.NotEqual(t, commP1, commP4) + require.NotEqual(t, commP2, commP3) + + // fails when CARv2 file path isn't a valid one. + env := &providerDealEnvironment{} + pieceCid, _, err := env.GeneratePieceCommitment(cid.Cid{}, "randpath", pieceSize) + require.Error(t, err) + require.Contains(t, err.Error(), "no such file or directory") + require.Equal(t, cid.Undef, pieceCid) +} + +func genProviderCommP(t *testing.T, carv2 string, pieceSize abi.PaddedPieceSize) cid.Cid { + env := &providerDealEnvironment{} + pieceCid, _, err := env.GeneratePieceCommitment(cid.Cid{}, carv2, pieceSize) + require.NoError(t, err) + require.NotEqual(t, pieceCid, cid.Undef) + return pieceCid +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_test.go new file mode 100644 index 00000000000..b03669654e8 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/provider_test.go @@ -0,0 +1,327 @@ +package storageimpl_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/exp/rand" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +func TestProvider_Migrations(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + deps := dependencies.NewDependenciesWithTestData(t, ctx, shared_testutil.NewLibp2pTestData(ctx, t), testnodes.NewStorageMarketState(), "", + noOpDelay, noOpDelay) + + providerDs := namespace.Wrap(deps.TestData.Ds1, datastore.NewKey("/deals/provider")) + + numDeals := 5 + dealProposals := make([]*market.ClientDealProposal, numDeals) + proposalCids := make([]cid.Cid, numDeals) + addFundsCids := make([]*cid.Cid, numDeals) + miners := make([]peer.ID, numDeals) + clients := make([]peer.ID, numDeals) + dealIDs := make([]abi.DealID, numDeals) + payloadCids := make([]cid.Cid, numDeals) + messages := make([]string, numDeals) + publishCids := make([]*cid.Cid, numDeals) + fastRetrievals := make([]bool, numDeals) + storeIDs := make([]*uint64, numDeals) + fundsReserveds := make([]abi.TokenAmount, numDeals) + creationTimes := make([]cbg.CborTime, numDeals) + availableForRetrievals := make([]bool, numDeals) + piecePaths := make([]filestore.Path, numDeals) + metadataPaths := make([]filestore.Path, numDeals) + + for i := 0; i < numDeals; i++ { + dealProposals[i] = shared_testutil.MakeTestClientDealProposal() + proposalNd, err := cborutil.AsIpld(dealProposals[i]) + require.NoError(t, err) + proposalCids[i] = proposalNd.Cid() + payloadCids[i] = shared_testutil.GenerateCids(1)[0] + storeID := rand.Uint64() + storeIDs[i] = &storeID + messages[i] = string(shared_testutil.RandomBytes(20)) + fundsReserveds[i] = big.NewInt(rand.Int63()) + fastRetrievals[i] = rand.Intn(2) == 1 + publishMessage := shared_testutil.GenerateCids(1)[0] + publishCids[i] = &publishMessage + addFundsCid := shared_testutil.GenerateCids(1)[0] + addFundsCids[i] = &addFundsCid + dealIDs[i] = abi.DealID(rand.Uint64()) + miners[i] = shared_testutil.GeneratePeers(1)[0] + clients[i] = shared_testutil.GeneratePeers(1)[0] + availableForRetrievals[i] = rand.Intn(2) == 1 + piecePaths[i] = filestore.Path(shared_testutil.RandomBytes(20)) + metadataPaths[i] = filestore.Path(shared_testutil.RandomBytes(20)) + now := time.Now() + creationTimes[i] = cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) + timeBuf := new(bytes.Buffer) + err = creationTimes[i].MarshalCBOR(timeBuf) + require.NoError(t, err) + err = cborutil.ReadCborRPC(timeBuf, &creationTimes[i]) + require.NoError(t, err) + prop, err := oldDealProposal(dealProposals[i]) + require.NoError(t, err) + deal := migrations.MinerDeal0{ + ClientDealProposal: *prop, + ProposalCid: proposalCids[i], + AddFundsCid: addFundsCids[i], + PublishCid: publishCids[i], + Miner: miners[i], + Client: clients[i], + State: storagemarket.StorageDealExpired, + PiecePath: piecePaths[i], + MetadataPath: metadataPaths[i], + SlashEpoch: abi.ChainEpoch(0), + FastRetrieval: fastRetrievals[i], + Message: messages[i], + StoreID: storeIDs[i], + FundsReserved: fundsReserveds[i], + Ref: &migrations.DataRef0{ + TransferType: storagemarket.TTGraphsync, + Root: payloadCids[i], + }, + AvailableForRetrieval: availableForRetrievals[i], + DealID: dealIDs[i], + CreationTime: creationTimes[i], + } + buf := new(bytes.Buffer) + err = deal.MarshalCBOR(buf) + require.NoError(t, err) + err = providerDs.Put(ctx, datastore.NewKey(deal.ProposalCid.String()), buf.Bytes()) + require.NoError(t, err) + } + + pi := shared_testutil.NewMockIndexProvider() + + provider, err := storageimpl.NewProvider( + + network.NewFromLibp2pHost(deps.TestData.Host2, network.RetryParameters(0, 0, 0, 0)), + providerDs, + deps.Fs, + deps.DagStore, + pi, + deps.PieceStore, + deps.DTProvider, + deps.ProviderNode, + deps.ProviderAddr, + deps.StoredAsk, + &testharness.MeshCreatorStub{}, + ) + require.NoError(t, err) + + shared_testutil.StartAndWaitForReady(ctx, t, provider) + deals, err := provider.ListLocalDeals() + require.NoError(t, err) + for i := 0; i < numDeals; i++ { + var deal storagemarket.MinerDeal + for _, testDeal := range deals { + if testDeal.Ref.Root.Equals(payloadCids[i]) { + deal = testDeal + break + } + } + expectedDeal := storagemarket.MinerDeal{ + ClientDealProposal: *dealProposals[i], + ProposalCid: proposalCids[i], + AddFundsCid: addFundsCids[i], + PublishCid: publishCids[i], + Miner: miners[i], + Client: clients[i], + State: storagemarket.StorageDealExpired, + PiecePath: piecePaths[i], + MetadataPath: metadataPaths[i], + SlashEpoch: abi.ChainEpoch(0), + FastRetrieval: fastRetrievals[i], + Message: messages[i], + FundsReserved: fundsReserveds[i], + Ref: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: payloadCids[i], + }, + AvailableForRetrieval: availableForRetrievals[i], + DealID: dealIDs[i], + CreationTime: creationTimes[i], + } + require.Equal(t, expectedDeal, deal) + } + + // Verify get deal by signed proposal cid + deal, err := provider.GetLocalDeal(deals[0].ProposalCid) + require.NoError(t, err) + require.Equal(t, deals[0].ProposalCid, deal.ProposalCid) + + // Verify the deal count + count, err := provider.LocalDealCount() + require.NoError(t, err) + require.Equal(t, len(deals), count) + + // Verify get a page of deals without a nil start proposal cid + listedDeals, err := provider.ListLocalDealsPage(nil, 0, len(deals)) + require.NoError(t, err) + require.Len(t, listedDeals, len(deals)) + for i, dl := range listedDeals { + if i == 0 { + continue + } + // Verify descending order by creation time + require.True(t, dl.CreationTime.Time().Before(listedDeals[i-1].CreationTime.Time())) + } + firstDeal := listedDeals[0] + secondDeal := listedDeals[1] + thirdDeal := listedDeals[2] + + // Verify get a page of deals with a nil start proposal cid and with a limit + listedDeals, err = provider.ListLocalDealsPage(nil, 0, 2) + require.NoError(t, err) + require.Len(t, listedDeals, 2) + // Verify correct deals + require.Equal(t, firstDeal.ProposalCid, listedDeals[0].ProposalCid) + require.Equal(t, secondDeal.ProposalCid, listedDeals[1].ProposalCid) + + // Verify get a page of deals with a start proposal cid and with a limit + listedDeals, err = provider.ListLocalDealsPage(&secondDeal.ProposalCid, 0, 2) + require.NoError(t, err) + require.Len(t, listedDeals, 2) + // Verify correct deals + require.Equal(t, secondDeal.ProposalCid, listedDeals[0].ProposalCid) + require.Equal(t, thirdDeal.ProposalCid, listedDeals[1].ProposalCid) + + // Verify get a page of deals with a start proposal cid, and offset and a limit + listedDeals, err = provider.ListLocalDealsPage(&secondDeal.ProposalCid, 1, 1) + require.NoError(t, err) + require.Len(t, listedDeals, 1) + // Verify correct deals + require.Equal(t, thirdDeal.ProposalCid, listedDeals[0].ProposalCid) +} + +func oldDealProposal(p *market.ClientDealProposal) (*marketOld.ClientDealProposal, error) { + label, err := p.Proposal.Label.ToString() + if err != nil { + return nil, err + } + return &marketOld.ClientDealProposal{ + Proposal: marketOld.DealProposal{ + PieceCID: p.Proposal.PieceCID, + PieceSize: p.Proposal.PieceSize, + VerifiedDeal: p.Proposal.VerifiedDeal, + Client: p.Proposal.Client, + Provider: p.Proposal.Provider, + Label: label, + StartEpoch: p.Proposal.StartEpoch, + EndEpoch: p.Proposal.EndEpoch, + StoragePricePerEpoch: p.Proposal.StoragePricePerEpoch, + ProviderCollateral: p.Proposal.ProviderCollateral, + ClientCollateral: p.Proposal.ClientCollateral, + }, + ClientSignature: p.ClientSignature, + }, nil +} + +func TestHandleDealStream(t *testing.T) { + t.Run("handles cases where the proposal is already being tracked", func(t *testing.T) { + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deps := dependencies.NewDependenciesWithTestData(t, ctx, shared_testutil.NewLibp2pTestData(ctx, t), testnodes.NewStorageMarketState(), "", + noOpDelay, noOpDelay) + var providerDs datastore.Batching = namespace.Wrap(deps.TestData.Ds1, datastore.NewKey("/deals/provider")) + namespaced := shared_testutil.DatastoreAtVersion(t, providerDs, "2") + + proposal := shared_testutil.MakeTestClientDealProposal() + proposalNd, err := cborutil.AsIpld(proposal) + require.NoError(t, err) + payloadCid := shared_testutil.GenerateCids(1)[0] + dataRef := &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: payloadCid, + } + + now := time.Now() + creationTime := cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) + timeBuf := new(bytes.Buffer) + err = creationTime.MarshalCBOR(timeBuf) + require.NoError(t, err) + err = cborutil.ReadCborRPC(timeBuf, &creationTime) + require.NoError(t, err) + deal := storagemarket.MinerDeal{ + ClientDealProposal: *proposal, + ProposalCid: proposalNd.Cid(), + State: storagemarket.StorageDealTransferring, + Ref: dataRef, + } + + // jam a miner state in + buf := new(bytes.Buffer) + err = deal.MarshalCBOR(buf) + require.NoError(t, err) + err = namespaced.Put(ctx, datastore.NewKey(deal.ProposalCid.String()), buf.Bytes()) + require.NoError(t, err) + + pi := shared_testutil.NewMockIndexProvider() + + provider, err := storageimpl.NewProvider( + network.NewFromLibp2pHost(deps.TestData.Host2, network.RetryParameters(0, 0, 0, 0)), + providerDs, + deps.Fs, + deps.DagStore, + pi, + deps.PieceStore, + deps.DTProvider, + deps.ProviderNode, + deps.ProviderAddr, + deps.StoredAsk, + &testharness.MeshCreatorStub{}, + ) + require.NoError(t, err) + + impl := provider.(*storageimpl.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, impl) + + var responseWriteCount int + s := shared_testutil.NewTestStorageDealStream(shared_testutil.TestStorageDealStreamParams{ + ProposalReader: func() (network.Proposal, error) { + return network.Proposal{ + DealProposal: proposal, + Piece: dataRef, + FastRetrieval: false, + }, nil + }, + ResponseWriter: func(response network.SignedResponse, resigningFunc network.ResigningFunc) error { + responseWriteCount += 1 + return nil + }, + }) + + // Send a deal proposal for a cid we are already tracking + impl.HandleDealStream(s) + + require.Equal(t, 1, responseWriteCount) + }) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/doc.go new file mode 100644 index 00000000000..96ba495f17f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/doc.go @@ -0,0 +1,13 @@ +/* +Package providerstates contains state machine logic relating to the `StorageProvider`. + +provider_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. + +provider_states.go contains state handler functions. + +The following diagram illustrates the operation of the provider state machine. This diagram is auto-generated from current code and should remain up to date over time: + +https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg + +*/ +package providerstates diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_fsm.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_fsm.go new file mode 100644 index 00000000000..d1cd09ecd44 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_fsm.go @@ -0,0 +1,323 @@ +package providerstates + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// ProviderEvents are the events that can happen in a storage provider +var ProviderEvents = fsm.Events{ + fsm.Event(storagemarket.ProviderEventOpen).From(storagemarket.StorageDealUnknown).To(storagemarket.StorageDealValidating), + fsm.Event(storagemarket.ProviderEventNodeErrored).FromAny().To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error calling node: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealRejected). + FromMany(storagemarket.StorageDealValidating, storagemarket.StorageDealVerifyData, storagemarket.StorageDealAcceptWait).To(storagemarket.StorageDealRejecting). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("deal rejected: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventRejectionSent). + From(storagemarket.StorageDealRejecting).To(storagemarket.StorageDealFailing), + fsm.Event(storagemarket.ProviderEventDealDeciding). + From(storagemarket.StorageDealValidating).To(storagemarket.StorageDealAcceptWait), + fsm.Event(storagemarket.ProviderEventDataRequested). + From(storagemarket.StorageDealAcceptWait).To(storagemarket.StorageDealWaitingForData), + + fsm.Event(storagemarket.ProviderEventDataTransferFailed). + FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferAwaitRestart). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error transferring data: %w", err).Error() + return nil + }), + + fsm.Event(storagemarket.ProviderEventDataTransferInitiated). + FromMany(storagemarket.StorageDealWaitingForData, storagemarket.StorageDealProviderTransferAwaitRestart). + To(storagemarket.StorageDealTransferring). + Action(func(deal *storagemarket.MinerDeal, channelId datatransfer.ChannelID) error { + deal.TransferChannelId = &channelId + return nil + }), + + fsm.Event(storagemarket.ProviderEventDataTransferRestarted). + FromMany(storagemarket.StorageDealWaitingForData, storagemarket.StorageDealProviderTransferAwaitRestart). + To(storagemarket.StorageDealTransferring). + From(storagemarket.StorageDealTransferring).ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal, channelId datatransfer.ChannelID) error { + deal.TransferChannelId = &channelId + deal.Message = "" + return nil + }), + + fsm.Event(storagemarket.ProviderEventDataTransferStalled). + FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferAwaitRestart). + ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal) error { + deal.Message = "data transfer appears to be stalled, awaiting reconnect from client" + return nil + }), + + fsm.Event(storagemarket.ProviderEventDataTransferCancelled). + FromMany( + storagemarket.StorageDealWaitingForData, + storagemarket.StorageDealTransferring, + storagemarket.StorageDealProviderTransferAwaitRestart, + ). + To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal) error { + deal.Message = "data transfer cancelled" + return nil + }), + + fsm.Event(storagemarket.ProviderEventDataTransferCompleted). + FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferAwaitRestart). + To(storagemarket.StorageDealVerifyData), + + fsm.Event(storagemarket.ProviderEventDataVerificationFailed). + From(storagemarket.StorageDealVerifyData).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error, path filestore.Path, metadataPath filestore.Path) error { + deal.PiecePath = path + deal.MetadataPath = metadataPath + deal.Message = xerrors.Errorf("deal data verification failed: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventVerifiedData). + FromMany(storagemarket.StorageDealVerifyData, storagemarket.StorageDealWaitingForData).To(storagemarket.StorageDealReserveProviderFunds). + Action(func(deal *storagemarket.MinerDeal, path filestore.Path, metadataPath filestore.Path) error { + deal.PiecePath = path + deal.MetadataPath = metadataPath + return nil + }), + // add by lin + fsm.Event(storagemarket.ProviderEventVerifiedDataOfSxx). + From(storagemarket.StorageDealWaitingForData).To(storagemarket.StorageDealReserveProviderFundsOfSxx). + Action(func(deal *storagemarket.MinerDeal, path filestore.Path, metadataPath filestore.Path, worker string) error { + deal.PiecePath = path + deal.MetadataPath = metadataPath + deal.Worker = worker + return nil + }), + fsm.Event(storagemarket.ProviderEventFundedOfSxx). + From(storagemarket.StorageDealReserveProviderFundsOfSxx).To(storagemarket.StorageDealPublishOfSxx). + Action(func(deal *storagemarket.MinerDeal, worker string) error { + deal.Worker = worker + return nil + }), + fsm.Event(storagemarket.ProviderEventDealPublishInitiatedOfSxx). + From(storagemarket.StorageDealPublishOfSxx).To(storagemarket.StorageDealPublishingOfSxx). + Action(func(deal *storagemarket.MinerDeal, finalCid cid.Cid, worker string) error { + deal.PublishCid = &finalCid + deal.Worker = worker + return nil + }), + fsm.Event(storagemarket.ProviderEventDealPublishedOfSxx). + From(storagemarket.StorageDealPublishingOfSxx).To(storagemarket.StorageDealStagedOfSxx). + Action(func(deal *storagemarket.MinerDeal, dealID abi.DealID, finalCid cid.Cid, worker string) error { + deal.DealID = dealID + deal.PublishCid = &finalCid + deal.Worker = worker + return nil + }), + // end + fsm.Event(storagemarket.ProviderEventFundingInitiated). + FromMany(storagemarket.StorageDealReserveProviderFunds, storagemarket.StorageDealReserveProviderFundsOfSxx).To(storagemarket.StorageDealProviderFunding). + Action(func(deal *storagemarket.MinerDeal, mcid cid.Cid) error { + deal.AddFundsCid = &mcid + return nil + }), + fsm.Event(storagemarket.ProviderEventFunded). + FromMany(storagemarket.StorageDealProviderFunding, storagemarket.StorageDealReserveProviderFunds).To(storagemarket.StorageDealPublish), + fsm.Event(storagemarket.ProviderEventDealPublishInitiated). + From(storagemarket.StorageDealPublish).To(storagemarket.StorageDealPublishing). + Action(func(deal *storagemarket.MinerDeal, finalCid cid.Cid) error { + deal.PublishCid = &finalCid + return nil + }), + fsm.Event(storagemarket.ProviderEventDealPublishError). + FromMany(storagemarket.StorageDealPublishing, storagemarket.StorageDealPublishingOfSxx).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("PublishStorageDeal error: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventSendResponseFailed). + FromMany(storagemarket.StorageDealAcceptWait, storagemarket.StorageDealRejecting).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("sending response to deal: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealPublished). + FromMany(storagemarket.StorageDealPublishing, storagemarket.StorageDealPublishingOfSxx).To(storagemarket.StorageDealStaged). + Action(func(deal *storagemarket.MinerDeal, dealID abi.DealID, finalCid cid.Cid) error { + deal.DealID = dealID + deal.PublishCid = &finalCid + return nil + }), + // change by lin + fsm.Event(storagemarket.ProviderEventFileStoreErrored). + FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealStagedOfSxx, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("accessing file store: %w", err).Error() + return nil + }), + + fsm.Event(storagemarket.ProviderEventMultistoreErrored). + FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealStagedOfSxx).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("operating on multistore: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealHandoffFailed).FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealStagedOfSxx).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("handing off deal to node: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventPieceStoreErrored). + FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealStagedOfSxx).ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("recording piece for retrieval: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealHandedOff). + FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealStagedOfSxx).To(storagemarket.StorageDealAwaitingPreCommit). + Action(func(deal *storagemarket.MinerDeal) error { + deal.AvailableForRetrieval = true + return nil + }), + // end + fsm.Event(storagemarket.ProviderEventDealPrecommitFailed). + From(storagemarket.StorageDealAwaitingPreCommit).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error awaiting deal pre-commit: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealPrecommitted). + From(storagemarket.StorageDealAwaitingPreCommit).To(storagemarket.StorageDealSealing). + Action(func(deal *storagemarket.MinerDeal, sectorNumber abi.SectorNumber) error { + deal.SectorNumber = sectorNumber + return nil + }), + fsm.Event(storagemarket.ProviderEventDealActivationFailed). + From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error activating deal: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventDealActivated). + FromMany(storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing). + To(storagemarket.StorageDealFinalizing), + fsm.Event(storagemarket.ProviderEventFinalized). + From(storagemarket.StorageDealFinalizing).To(storagemarket.StorageDealActive). + Action(func(deal *storagemarket.MinerDeal) error { + return nil + }), + fsm.Event(storagemarket.ProviderEventDealSlashed). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealSlashed). + Action(func(deal *storagemarket.MinerDeal, slashEpoch abi.ChainEpoch) error { + deal.SlashEpoch = slashEpoch + return nil + }), + fsm.Event(storagemarket.ProviderEventDealExpired). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealExpired), + fsm.Event(storagemarket.ProviderEventDealCompletionFailed). + From(storagemarket.StorageDealActive).To(storagemarket.StorageDealError). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error waiting for deal completion: %w", err).Error() + return nil + }), + + fsm.Event(storagemarket.ProviderEventFailed).From(storagemarket.StorageDealFailing).To(storagemarket.StorageDealError), + + fsm.Event(storagemarket.ProviderEventRestart). + FromMany(storagemarket.StorageDealValidating, storagemarket.StorageDealAcceptWait, storagemarket.StorageDealRejecting). + To(storagemarket.StorageDealError). + From(storagemarket.StorageDealTransferring). + To(storagemarket.StorageDealProviderTransferAwaitRestart). + FromAny().ToNoChange(), + + fsm.Event(storagemarket.ProviderEventAwaitTransferRestartTimeout). + From(storagemarket.StorageDealProviderTransferAwaitRestart).To(storagemarket.StorageDealFailing). + FromAny().ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal) error { + if deal.State == storagemarket.StorageDealProviderTransferAwaitRestart { + deal.Message = fmt.Sprintf("timed out waiting for client to restart transfer") + } + return nil + }), + fsm.Event(storagemarket.ProviderEventTrackFundsFailed). + FromMany(storagemarket.StorageDealReserveProviderFunds, storagemarket.StorageDealReserveProviderFundsOfSxx).To(storagemarket.StorageDealFailing). + Action(func(deal *storagemarket.MinerDeal, err error) error { + deal.Message = xerrors.Errorf("error tracking deal funds: %w", err).Error() + return nil + }), + fsm.Event(storagemarket.ProviderEventFundsReserved). + FromMany(storagemarket.StorageDealReserveProviderFunds, storagemarket.StorageDealReserveProviderFundsOfSxx).ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal, fundsReserved abi.TokenAmount) error { + if deal.FundsReserved.Nil() { + deal.FundsReserved = fundsReserved + } else { + deal.FundsReserved = big.Add(deal.FundsReserved, fundsReserved) + } + return nil + }), + fsm.Event(storagemarket.ProviderEventFundsReleased). + FromMany(storagemarket.StorageDealPublishing, storagemarket.StorageDealFailing).ToJustRecord(). + Action(func(deal *storagemarket.MinerDeal, fundsReleased abi.TokenAmount) error { + deal.FundsReserved = big.Subtract(deal.FundsReserved, fundsReleased) + return nil + }), +} + +// ProviderStateEntryFuncs are the handlers for different states in a storage client +var ProviderStateEntryFuncs = fsm.StateEntryFuncs{ + storagemarket.StorageDealValidating: ValidateDealProposal, + storagemarket.StorageDealAcceptWait: DecideOnProposal, + storagemarket.StorageDealProviderTransferAwaitRestart: WaitForTransferRestart, + storagemarket.StorageDealVerifyData: VerifyData, + storagemarket.StorageDealReserveProviderFunds: ReserveProviderFunds, + storagemarket.StorageDealReserveProviderFundsOfSxx: ReserveProviderFundsOfSxx, + storagemarket.StorageDealProviderFunding: WaitForFunding, + storagemarket.StorageDealPublish: PublishDeal, + storagemarket.StorageDealPublishOfSxx: PublishDealOfSxx, + storagemarket.StorageDealPublishing: WaitForPublish, + storagemarket.StorageDealPublishingOfSxx: WaitForPublishOfSxx, + storagemarket.StorageDealStaged: HandoffDeal, + storagemarket.StorageDealStagedOfSxx: HandoffDealOfSxx, + storagemarket.StorageDealAwaitingPreCommit: VerifyDealPreCommitted, + storagemarket.StorageDealSealing: VerifyDealActivated, + storagemarket.StorageDealRejecting: RejectDeal, + storagemarket.StorageDealFinalizing: CleanupDeal, + storagemarket.StorageDealActive: WaitForDealCompletion, + storagemarket.StorageDealFailing: FailDeal, +} + +// ProviderFinalityStates are the states that terminate deal processing for a deal. +// When a provider restarts, it restarts only deals that are not in a finality state. +var ProviderFinalityStates = []fsm.StateKey{ + storagemarket.StorageDealError, + storagemarket.StorageDealSlashed, + storagemarket.StorageDealExpired, +} + +// StatesKnownBySealingSubsystem are the states on the happy path after hand-off to +// the sealing subsystem +var StatesKnownBySealingSubsystem = []fsm.StateKey{ + storagemarket.StorageDealStaged, + storagemarket.StorageDealStagedOfSxx, + storagemarket.StorageDealAwaitingPreCommit, + storagemarket.StorageDealSealing, + storagemarket.StorageDealFinalizing, + storagemarket.StorageDealActive, +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states.go new file mode 100644 index 00000000000..11e2b701613 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states.go @@ -0,0 +1,802 @@ +package providerstates + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + carv2 "github.com/ipld/go-car/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + + "os" +) + +var log = logging.Logger("providerstates") + +// TODO: These are copied from spec-actors master, use spec-actors exports when we update +const DealMaxLabelSize = 256 + +// ProviderDealEnvironment are the dependencies needed for processing deals +// with a ProviderStateEntryFunc +type ProviderDealEnvironment interface { + ReadCAR(path string) (*carv2.Reader, error) + + RegisterShard(ctx context.Context, pieceCid cid.Cid, path string, eagerInit bool) error + AnnounceIndex(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) + RemoveIndex(ctx context.Context, proposalCid cid.Cid) error + + FinalizeBlockstore(proposalCid cid.Cid) error + TerminateBlockstore(proposalCid cid.Cid, path string) error + + GeneratePieceCommitment(proposalCid cid.Cid, path string, dealSize abi.PaddedPieceSize) (cid.Cid, filestore.Path, error) + + Address() address.Address + Node() storagemarket.StorageProviderNode + Ask() storagemarket.StorageAsk + SendSignedResponse(ctx context.Context, response *network.Response) error + Disconnect(proposalCid cid.Cid) error + FileStore() filestore.FileStore + PieceStore() piecestore.PieceStore + RunCustomDecisionLogic(context.Context, storagemarket.MinerDeal) (bool, string, error) + AwaitRestartTimeout() <-chan time.Time + network.PeerTagger +} + +// ProviderStateEntryFunc is the signature for a StateEntryFunc in the provider FSM +type ProviderStateEntryFunc func(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error + +// ValidateDealProposal validates a proposed deal against the provider criteria +func ValidateDealProposal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + environment.TagPeer(deal.Client, deal.ProposalCid.String()) + + tok, curEpoch, err := environment.Node().GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error getting most recent state id: %w", err)) + } + + if err := providerutils.VerifyProposal(ctx.Context(), deal.ClientDealProposal, tok, environment.Node().VerifySignature); err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("verifying StorageDealProposal: %w", err)) + } + + proposal := deal.Proposal + + if proposal.Provider != environment.Address() { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("incorrect provider for deal")) + } + + if proposal.Label.Length() > DealMaxLabelSize { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("deal label can be at most %d bytes, is %d", DealMaxLabelSize, proposal.Label.Length())) + } + + if err := proposal.PieceSize.Validate(); err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposal piece size is invalid: %w", err)) + } + + if !proposal.PieceCID.Defined() { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposal PieceCID undefined")) + } + + if proposal.PieceCID.Prefix() != market.PieceCIDPrefix { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposal PieceCID had wrong prefix")) + } + + if proposal.EndEpoch <= proposal.StartEpoch { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposal end before proposal start")) + } + + if curEpoch > proposal.StartEpoch { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("deal start epoch has already elapsed")) + } + + // Check that the delta between the start and end epochs (the deal + // duration) is within acceptable bounds + minDuration, maxDuration := market.DealDurationBounds(proposal.PieceSize) + if proposal.Duration() < minDuration || proposal.Duration() > maxDuration { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("deal duration out of bounds (min, max, provided): %d, %d, %d", minDuration, maxDuration, proposal.Duration())) + } + + // Check that the proposed end epoch isn't too far beyond the current epoch + maxEndEpoch := curEpoch + minertypes.MaxSectorExpirationExtension + if proposal.EndEpoch > maxEndEpoch { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("invalid deal end epoch %d: cannot be more than %d past current epoch %d", proposal.EndEpoch, minertypes.MaxSectorExpirationExtension, curEpoch)) + } + + pcMin, pcMax, err := environment.Node().DealProviderCollateralBounds(ctx.Context(), proposal.PieceSize, proposal.VerifiedDeal) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error getting collateral bounds: %w", err)) + } + + if proposal.ProviderCollateral.LessThan(pcMin) { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposed provider collateral below minimum: %s < %s", proposal.ProviderCollateral, pcMin)) + } + + if proposal.ProviderCollateral.GreaterThan(pcMax) { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("proposed provider collateral above maximum: %s > %s", proposal.ProviderCollateral, pcMax)) + } + + askPrice := environment.Ask().Price + if deal.Proposal.VerifiedDeal { + askPrice = environment.Ask().VerifiedPrice + } + + minPrice := big.Div(big.Mul(askPrice, abi.NewTokenAmount(int64(proposal.PieceSize))), abi.NewTokenAmount(1<<30)) + if proposal.StoragePricePerEpoch.LessThan(minPrice) { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, + xerrors.Errorf("storage price per epoch less than asking price: %s < %s", proposal.StoragePricePerEpoch, minPrice)) + } + + if proposal.PieceSize < environment.Ask().MinPieceSize { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, + xerrors.Errorf("piece size less than minimum required size: %d < %d", proposal.PieceSize, environment.Ask().MinPieceSize)) + } + + if proposal.PieceSize > environment.Ask().MaxPieceSize { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, + xerrors.Errorf("piece size more than maximum allowed size: %d > %d", proposal.PieceSize, environment.Ask().MaxPieceSize)) + } + + // check market funds + clientMarketBalance, err := environment.Node().GetBalance(ctx.Context(), proposal.Client, tok) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error getting client market balance failed: %w", err)) + } + + // This doesn't guarantee that the client won't withdraw / lock those funds + // but it's a decent first filter + if clientMarketBalance.Available.LessThan(proposal.ClientBalanceRequirement()) { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("clientMarketBalance.Available too small: %d < %d", clientMarketBalance.Available, proposal.ClientBalanceRequirement())) + } + + // Verified deal checks + if proposal.VerifiedDeal { + dataCap, err := environment.Node().GetDataCap(ctx.Context(), proposal.Client, tok) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error fetching verified data cap: %w", err)) + } + if dataCap == nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error fetching verified data cap: data cap missing -- client not verified")) + } + pieceSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) + if dataCap.LessThan(pieceSize) { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("verified deal DataCap too small for proposed piece size")) + } + } + + return ctx.Trigger(storagemarket.ProviderEventDealDeciding) +} + +// DecideOnProposal allows custom decision logic to run before accepting a deal, such as allowing a manual +// operator to decide whether or not to accept the deal +func DecideOnProposal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + accept, reason, err := environment.RunCustomDecisionLogic(ctx.Context(), deal) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("custom deal decision logic failed: %w", err)) + } + + if !accept { + return ctx.Trigger(storagemarket.ProviderEventDealRejected, fmt.Errorf(reason)) + } + + // Send intent to accept + err = environment.SendSignedResponse(ctx.Context(), &network.Response{ + State: storagemarket.StorageDealWaitingForData, + Proposal: deal.ProposalCid, + }) + + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventSendResponseFailed, err) + } + + if err := environment.Disconnect(deal.ProposalCid); err != nil { + log.Warnf("closing client connection: %+v", err) + } + + return ctx.Trigger(storagemarket.ProviderEventDataRequested) +} + +// WaitForTransferRestart fires a timeout after a set amount of time. If the restart hasn't started at this point, +// the transfer fails +func WaitForTransferRestart(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + + timeout := environment.AwaitRestartTimeout() + go func() { + select { + case <-ctx.Context().Done(): + case <-timeout: + ctx.Trigger(storagemarket.ProviderEventAwaitTransferRestartTimeout) + } + }() + return nil +} + +// VerifyData verifies that data received for a deal matches the pieceCID +// in the proposal +func VerifyData(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + // finalize the blockstore as we're done writing deal data to it. + if err := environment.FinalizeBlockstore(deal.ProposalCid); err != nil { + return ctx.Trigger(storagemarket.ProviderEventDataVerificationFailed, xerrors.Errorf("failed to finalize read/write blockstore: %w", err), filestore.Path(""), filestore.Path("")) + } + + pieceCid, metadataPath, err := environment.GeneratePieceCommitment(deal.ProposalCid, deal.InboundCAR, deal.Proposal.PieceSize) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDataVerificationFailed, xerrors.Errorf("error generating CommP: %w", err), filestore.Path(""), filestore.Path("")) + } + + // Verify CommP matches + if pieceCid != deal.Proposal.PieceCID { + return ctx.Trigger(storagemarket.ProviderEventDataVerificationFailed, xerrors.Errorf("proposal CommP doesn't match calculated CommP"), filestore.Path(""), metadataPath) + } + + return ctx.Trigger(storagemarket.ProviderEventVerifiedData, filestore.Path(""), metadataPath) +} + +// ReserveProviderFunds adds funds, as needed to the StorageMarketActor, so the miner has adequate collateral for the deal +func ReserveProviderFunds(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + node := environment.Node() + + tok, _, err := node.GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("acquiring chain head: %w", err)) + } + + waddr, err := node.GetMinerWorkerAddress(ctx.Context(), deal.Proposal.Provider, tok) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("looking up miner worker: %w", err)) + } + + mcid, err := node.ReserveFunds(ctx.Context(), waddr, deal.Proposal.Provider, deal.Proposal.ProviderCollateral) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("reserving funds: %w", err)) + } + + _ = ctx.Trigger(storagemarket.ProviderEventFundsReserved, deal.Proposal.ProviderCollateral) + + // if no message was sent, and there was no error, funds were already available + if mcid == cid.Undef { + return ctx.Trigger(storagemarket.ProviderEventFunded) + } + + return ctx.Trigger(storagemarket.ProviderEventFundingInitiated, mcid) +} + +func ReserveProviderFundsOfSxx(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + log.Errorf("zlin ReserveProviderFunds %+v", deal.Worker) + node := environment.Node() + + tok, _, err := node.GetChainHead(ctx.Context()) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("acquiring chain head: %w", err)) + } + + waddr, err := node.GetMinerWorkerAddress(ctx.Context(), deal.Proposal.Provider, tok) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("looking up miner worker: %w", err)) + } + + mcid, err := node.ReserveFunds(ctx.Context(), waddr, deal.Proposal.Provider, deal.Proposal.ProviderCollateral) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("reserving funds: %w", err)) + } + + _ = ctx.Trigger(storagemarket.ProviderEventFundsReserved, deal.Proposal.ProviderCollateral) + + // if no message was sent, and there was no error, funds were already available + if mcid == cid.Undef { + return ctx.Trigger(storagemarket.ProviderEventFundedOfSxx, deal.Worker) + } + + return ctx.Trigger(storagemarket.ProviderEventFundingInitiated, mcid) +} + +// WaitForFunding waits for a message posted to add funds to the StorageMarketActor to appear on chain +func WaitForFunding(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + node := environment.Node() + + return node.WaitForMessage(ctx.Context(), *deal.AddFundsCid, func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error { + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("AddFunds errored: %w", err)) + } + if code != exitcode.Ok { + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("AddFunds exit code: %s", code.String())) + } + return ctx.Trigger(storagemarket.ProviderEventFunded) + }) +} + +// PublishDeal sends a message to publish a deal on chain +func PublishDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + smDeal := storagemarket.MinerDeal{ + Client: deal.Client, + ClientDealProposal: deal.ClientDealProposal, + ProposalCid: deal.ProposalCid, + State: deal.State, + Ref: deal.Ref, + } + + mcid, err := environment.Node().PublishDeals(ctx.Context(), smDeal) + if err != nil { + if strings.Contains(err.Error(), "not enough funds") { + log.Warnf("publishing deal failed due to lack of funds: %s", err) + + return nil + } + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("publishing deal: %w", err)) + } + + return ctx.Trigger(storagemarket.ProviderEventDealPublishInitiated, mcid) +} + +func PublishDealOfSxx(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + log.Errorf("zlin PublishDeal %+v", deal.Worker) + smDeal := storagemarket.MinerDeal{ + Client: deal.Client, + ClientDealProposal: deal.ClientDealProposal, + ProposalCid: deal.ProposalCid, + State: deal.State, + Ref: deal.Ref, + } + + mcid, err := environment.Node().PublishDeals(ctx.Context(), smDeal) + if err != nil { + if strings.Contains(err.Error(), "not enough funds") { + log.Warnf("publishing deal failed due to lack of funds: %s", err) + + return nil + } + return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("publishing deal: %w", err)) + } + + return ctx.Trigger(storagemarket.ProviderEventDealPublishInitiatedOfSxx, mcid, deal.Worker) +} + +// WaitForPublish waits for the publish message on chain and saves the deal id +// so it can be sent back to the client +func WaitForPublish(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + if deal.PublishCid == nil { + log.Errorw("zlin: WaitForPublish deal %+v don't have PublishCid", deal.ProposalCid) + return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals errored: deal.PublishCid is nil")) + } + res, err := environment.Node().WaitForPublishDeals(ctx.Context(), *deal.PublishCid, deal.Proposal) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals errored: %w", err)) + } + + // Once the deal has been published, release funds that were reserved + // for deal publishing + releaseReservedFunds(ctx, environment, deal) + + // add by lin + if os.Getenv("LOTUS_OF_SXX") == "1" && strings.HasPrefix(string(deal.PiecePath), "/"){ + return ctx.Trigger(storagemarket.ProviderEventDealPublishedOfSxx, res.DealID, res.FinalCid) + } + log.Errorw("zlin: unuse SXX publish") + // end + + return ctx.Trigger(storagemarket.ProviderEventDealPublished, res.DealID, res.FinalCid) +} + +func WaitForPublishOfSxx(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + log.Errorf("zlin WaitForPublish %+v", deal.Worker) + if deal.PublishCid == nil { + log.Errorw("zlin: WaitForPublish deal %+v don't have PublishCid", deal.ProposalCid) + return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals errored: deal.PublishCid is nil")) + } + res, err := environment.Node().WaitForPublishDeals(ctx.Context(), *deal.PublishCid, deal.Proposal) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals errored: %w", err)) + } + + // Once the deal has been published, release funds that were reserved + // for deal publishing + releaseReservedFunds(ctx, environment, deal) + + // add by lin + if os.Getenv("LOTUS_OF_SXX") == "1" && strings.HasPrefix(string(deal.PiecePath), "/"){ + return ctx.Trigger(storagemarket.ProviderEventDealPublishedOfSxx, res.DealID, res.FinalCid, deal.Worker) + } + // end + + return ctx.Trigger(storagemarket.ProviderEventDealPublished, res.DealID, res.FinalCid) +} + +// add by lin +func HandoffDealOfSxx(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + log.Errorf("zlin: HandoffDealOfSxx %+v", string(deal.Worker)) + carFilePath := string(deal.PiecePath) + if deal.PiecePath == "" { + err := xerrors.Errorf("our path of deal car is nil") + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, err) + } + // Data for offline deals is stored on disk, so if PiecePath is set, + // create a Reader from the file path + var reader shared.ReadSeekStarter + reader = &ReadSeekStarter{io.LimitReader(nil, 0)} + + packingInfo, err := environment.Node().OnDealCompleteOfSxx( + ctx.Context(), + storagemarket.MinerDeal{ + Client: deal.Client, + ClientDealProposal: deal.ClientDealProposal, + ProposalCid: deal.ProposalCid, + State: deal.State, + Ref: deal.Ref, + PublishCid: deal.PublishCid, + DealID: deal.DealID, + FastRetrieval: deal.FastRetrieval, + RemoteFilepath: carFilePath, + Worker: deal.Worker, + }, + deal.Proposal.PieceSize.Unpadded(), + reader, + ) + + if err != nil { + err = xerrors.Errorf("packing piece at path %s: %w", deal.PiecePath, err) + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, err) + } + + if err := recordPiece(environment, deal, packingInfo.SectorNumber, packingInfo.Offset, packingInfo.Size); err != nil { + err = xerrors.Errorf("failed to register deal data for piece %s for retrieval: %w", deal.Ref.PieceCid, err) + log.Error(err.Error()) + _ = ctx.Trigger(storagemarket.ProviderEventPieceStoreErrored, err) + } + + // Register the deal data as a "shard" with the DAG store. Later it can be + // fetched from the DAG store during retrieval. + if err := environment.RegisterShard(ctx.Context(), deal.Proposal.PieceCID, carFilePath, true); err != nil { + err = xerrors.Errorf("failed to activate shard: %w", err) + log.Error(err) + } + + // announce the deal to the network indexer + annCid, err := environment.AnnounceIndex(ctx.Context(), deal) + if err != nil { + log.Errorw("failed to announce index via reference provider", "proposalCid", deal.ProposalCid, "err", err) + } else { + log.Infow("deal announcement sent to index provider", "advertisementCid", annCid, "shard-key", deal.Proposal.PieceCID, + "proposalCid", deal.ProposalCid) + } + + log.Infow("successfully handed off deal to sealing subsystem", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid) + return ctx.Trigger(storagemarket.ProviderEventDealHandedOff) +} + +// HandoffDeal hands off a published deal for sealing and commitment in a sector +func HandoffDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + var packingInfo *storagemarket.PackingResult + var carFilePath string + if deal.PiecePath != "" { + // Data for offline deals is stored on disk, so if PiecePath is set, + // create a Reader from the file path + file, err := environment.FileStore().Open(deal.PiecePath) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventFileStoreErrored, + xerrors.Errorf("reading piece at path %s: %w", deal.PiecePath, err)) + } + carFilePath = string(file.OsPath()) + + // Hand the deal off to the process that adds it to a sector + log.Infow("handing off deal to sealing subsystem", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid) + packingInfo, err = handoffDeal(ctx.Context(), environment, deal, file, uint64(file.Size())) + if err := file.Close(); err != nil { + log.Errorw("failed to close imported CAR file", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid, "err", err) + } + + if err != nil { + err = xerrors.Errorf("packing piece at path %s: %w", deal.PiecePath, err) + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, err) + } + } else { + carFilePath = deal.InboundCAR + + v2r, err := environment.ReadCAR(deal.InboundCAR) + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, xerrors.Errorf("failed to open CARv2 file, proposalCid=%s: %w", + deal.ProposalCid, err)) + } + + // Hand the deal off to the process that adds it to a sector + var packingErr error + log.Infow("handing off deal to sealing subsystem", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid) + r, err := v2r.DataReader() + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, fmt.Errorf("failed to get reader over file data, proposalCid=%s: %w", + deal.ProposalCid, err)) + } + packingInfo, packingErr = handoffDeal(ctx.Context(), environment, deal, r, v2r.Header.DataSize) + // Close the reader as we're done reading from it. + if err := v2r.Close(); err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, xerrors.Errorf("failed to close CARv2 reader: %w", err)) + } + log.Infow("closed car datareader after handing off deal to sealing subsystem", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid) + if packingErr != nil { + err = xerrors.Errorf("packing piece %s: %w", deal.Ref.PieceCid, packingErr) + return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, err) + } + } + + if err := recordPiece(environment, deal, packingInfo.SectorNumber, packingInfo.Offset, packingInfo.Size); err != nil { + err = xerrors.Errorf("failed to register deal data for piece %s for retrieval: %w", deal.Ref.PieceCid, err) + log.Error(err.Error()) + _ = ctx.Trigger(storagemarket.ProviderEventPieceStoreErrored, err) + } + + // Register the deal data as a "shard" with the DAG store. Later it can be + // fetched from the DAG store during retrieval. + if err := environment.RegisterShard(ctx.Context(), deal.Proposal.PieceCID, carFilePath, true); err != nil { + err = xerrors.Errorf("failed to activate shard: %w", err) + log.Error(err) + } + + // announce the deal to the network indexer + annCid, err := environment.AnnounceIndex(ctx.Context(), deal) + if err != nil { + log.Errorw("failed to announce index via reference provider", "proposalCid", deal.ProposalCid, "err", err) + } else { + log.Infow("deal announcement sent to index provider", "advertisementCid", annCid, "shard-key", deal.Proposal.PieceCID, + "proposalCid", deal.ProposalCid) + } + + log.Infow("successfully handed off deal to sealing subsystem", "pieceCid", deal.Proposal.PieceCID, "proposalCid", deal.ProposalCid) + return ctx.Trigger(storagemarket.ProviderEventDealHandedOff) +} + +func handoffDeal(ctx context.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal, reader io.ReadSeeker, payloadSize uint64) (*storagemarket.PackingResult, error) { + // because we use the PadReader directly during Add Piece we need to produce the + // correct amount of zeroes + // (alternative would be to keep precise track of sector offsets for each + // piece which is just too much work for a seldom used feature) + paddedReader, err := shared.NewInflatorReader(reader, payloadSize, deal.Proposal.PieceSize.Unpadded()) + if err != nil { + return nil, err + } + + return environment.Node().OnDealComplete( + ctx, + storagemarket.MinerDeal{ + Client: deal.Client, + ClientDealProposal: deal.ClientDealProposal, + ProposalCid: deal.ProposalCid, + State: deal.State, + Ref: deal.Ref, + PublishCid: deal.PublishCid, + DealID: deal.DealID, + FastRetrieval: deal.FastRetrieval, + }, + deal.Proposal.PieceSize.Unpadded(), + paddedReader, + ) +} + +func recordPiece(environment ProviderDealEnvironment, deal storagemarket.MinerDeal, sectorID abi.SectorNumber, offset, length abi.PaddedPieceSize) error { + + var blockLocations map[cid.Cid]piecestore.BlockLocation + if deal.MetadataPath != filestore.Path("") { + var err error + blockLocations, err = providerutils.LoadBlockLocations(environment.FileStore(), deal.MetadataPath) + if err != nil { + return xerrors.Errorf("failed to load block locations: %w", err) + } + } else { + blockLocations = map[cid.Cid]piecestore.BlockLocation{ + deal.Ref.Root: {}, + } + } + + if err := environment.PieceStore().AddPieceBlockLocations(deal.Proposal.PieceCID, blockLocations); err != nil { + return xerrors.Errorf("failed to add piece block locations: %s", err) + } + + err := environment.PieceStore().AddDealForPiece(deal.Proposal.PieceCID, piecestore.DealInfo{ + DealID: deal.DealID, + SectorID: sectorID, + Offset: offset, + Length: length, + }) + if err != nil { + return xerrors.Errorf("failed to add deal for piece: %s", err) + } + + return nil +} + +// CleanupDeal clears the filestore once we know the mining component has read the data and it is in a sealed sector +func CleanupDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + if deal.PiecePath != "" { + err := environment.FileStore().Delete(deal.PiecePath) + if err != nil { + log.Warnf("deleting piece at path %s: %w", deal.PiecePath, err) + } + } + if deal.MetadataPath != "" { + err := environment.FileStore().Delete(deal.MetadataPath) + if err != nil { + log.Warnf("deleting piece at path %s: %w", deal.MetadataPath, err) + } + } + + if deal.InboundCAR != "" { + if err := environment.TerminateBlockstore(deal.ProposalCid, deal.InboundCAR); err != nil { + log.Warnf("failed to cleanup blockstore, car_path=%s: %s", deal.InboundCAR, err) + } + } + + return ctx.Trigger(storagemarket.ProviderEventFinalized) +} + +// VerifyDealPreCommitted verifies that a deal has been pre-committed +func VerifyDealPreCommitted(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) { + // It's possible that + // - we miss the pre-commit message and have to wait for prove-commit + // - the deal is already active (for example if the node is restarted + // while waiting for pre-commit) + // In either of these two cases, isActive will be true. + switch { + case err != nil: + _ = ctx.Trigger(storagemarket.ProviderEventDealPrecommitFailed, err) + case isActive: + _ = ctx.Trigger(storagemarket.ProviderEventDealActivated) + default: + _ = ctx.Trigger(storagemarket.ProviderEventDealPrecommitted, sectorNumber) + } + } + + err := environment.Node().OnDealSectorPreCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, deal.Proposal, deal.PublishCid, cb) + + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealPrecommitFailed, err) + } + return nil +} + +// VerifyDealActivated verifies that a deal has been committed to a sector and activated +func VerifyDealActivated(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + // TODO: consider waiting for seal to happen + cb := func(err error) { + if err != nil { + _ = ctx.Trigger(storagemarket.ProviderEventDealActivationFailed, err) + } else { + _ = ctx.Trigger(storagemarket.ProviderEventDealActivated) + } + } + + err := environment.Node().OnDealSectorCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, deal.SectorNumber, deal.Proposal, deal.PublishCid, cb) + + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealActivationFailed, err) + } + return nil +} + +// WaitForDealCompletion waits for the deal to be slashed or to expire +func WaitForDealCompletion(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + // At this point we have all the data so we can unprotect the connection + environment.UntagPeer(deal.Client, deal.ProposalCid.String()) + + node := environment.Node() + + // Called when the deal expires + expiredCb := func(err error) { + // Ask the indexer to remove this deal + environment.RemoveIndex(ctx.Context(), deal.ProposalCid) + + if err != nil { + _ = ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, xerrors.Errorf("deal expiration err: %w", err)) + } else { + _ = ctx.Trigger(storagemarket.ProviderEventDealExpired) + } + } + + // Called when the deal is slashed + slashedCb := func(slashEpoch abi.ChainEpoch, err error) { + // Ask the indexer to remove this deal + environment.RemoveIndex(ctx.Context(), deal.ProposalCid) + + if err != nil { + _ = ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, xerrors.Errorf("deal slashing err: %w", err)) + } else { + _ = ctx.Trigger(storagemarket.ProviderEventDealSlashed, slashEpoch) + } + } + + if err := node.OnDealExpiredOrSlashed(ctx.Context(), deal.DealID, expiredCb, slashedCb); err != nil { + return ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, err) + } + + return nil +} + +// RejectDeal sends a failure response before terminating a deal +func RejectDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + err := environment.SendSignedResponse(ctx.Context(), &network.Response{ + State: storagemarket.StorageDealFailing, + Message: deal.Message, + Proposal: deal.ProposalCid, + }) + + if err != nil { + return ctx.Trigger(storagemarket.ProviderEventSendResponseFailed, err) + } + + if err := environment.Disconnect(deal.ProposalCid); err != nil { + log.Warnf("closing client connection: %+v", err) + } + + return ctx.Trigger(storagemarket.ProviderEventRejectionSent) +} + +// FailDeal cleans up before terminating a deal +func FailDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { + log.Warnf("deal %s failed: %s", deal.ProposalCid, deal.Message) + + environment.UntagPeer(deal.Client, deal.ProposalCid.String()) + + if deal.PiecePath != filestore.Path("") { + err := environment.FileStore().Delete(deal.PiecePath) + if err != nil { + log.Warnf("deleting piece at path %s: %w", deal.PiecePath, err) + } + } + if deal.MetadataPath != filestore.Path("") { + err := environment.FileStore().Delete(deal.MetadataPath) + if err != nil { + log.Warnf("deleting piece at path %s: %w", deal.MetadataPath, err) + } + } + + if deal.InboundCAR != "" { + if err := environment.FinalizeBlockstore(deal.ProposalCid); err != nil { + log.Warnf("error finalizing read-write store, car_path=%s: %s", deal.InboundCAR, err) + } + + if err := environment.TerminateBlockstore(deal.ProposalCid, deal.InboundCAR); err != nil { + log.Warnf("error deleting store, car_path=%s: %s", deal.InboundCAR, err) + } + } + + releaseReservedFunds(ctx, environment, deal) + + return ctx.Trigger(storagemarket.ProviderEventFailed) +} + +func releaseReservedFunds(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) { + if !deal.FundsReserved.Nil() && !deal.FundsReserved.IsZero() { + err := environment.Node().ReleaseFunds(ctx.Context(), deal.Proposal.Provider, deal.FundsReserved) + if err != nil { + // nonfatal error + log.Warnf("failed to release funds: %s", err) + } + _ = ctx.Trigger(storagemarket.ProviderEventFundsReleased, deal.FundsReserved) + } +} + +// add by lin +type ReadSeekStarter struct { + io.Reader +} + +func (r *ReadSeekStarter) SeekStart() error { + return nil +} + +// end \ No newline at end of file diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states_test.go new file mode 100644 index 00000000000..70b4f450dc9 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerstates/provider_states_test.go @@ -0,0 +1,1659 @@ +package providerstates_test + +import ( + "bytes" + "context" + "crypto/sha256" + "errors" + "fmt" + "io" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ipfs/go-cid" + carv2 "github.com/ipld/go-car/v2" + "github.com/libp2p/go-libp2p-core/peer" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-statemachine/fsm" + fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/shared" + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +func TestValidateDealProposal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runValidateDealProposal := makeExecutor(ctx, eventProcessor, providerstates.ValidateDealProposal, storagemarket.StorageDealValidating) + otherAddr, err := address.NewActorAddress([]byte("applesauce")) + require.NoError(t, err) + bigDataCap := big.NewIntUnsigned(uint64(defaultPieceSize)) + smallDataCap := big.NewIntUnsigned(uint64(defaultPieceSize - 1)) + + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAcceptWait, deal.State) + require.Len(t, env.peerTagger.TagCalls, 1) + require.Equal(t, deal.Client, env.peerTagger.TagCalls[0]) + }, + }, + "verify signature fails": { + nodeParams: nodeParams{ + VerifySignatureFails: true, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: verifying StorageDealProposal: could not verify signature", deal.Message) + }, + }, + "provider address does not match": { + environmentParams: environmentParams{ + Address: otherAddr, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: incorrect provider for deal", deal.Message) + }, + }, + "MostRecentStateID errors": { + nodeParams: nodeParams{ + MostRecentStateIDError: errors.New("couldn't get id"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: node error getting most recent state id: couldn't get id", deal.Message) + }, + }, + "PricePerEpoch too low": { + dealParams: dealParams{ + StoragePricePerEpoch: abi.NewTokenAmount(5000), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: storage price per epoch less than asking price: 5000 < 9765", deal.Message) + }, + }, + "PieceSize < MinPieceSize": { + dealParams: dealParams{ + PieceSize: abi.PaddedPieceSize(128), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: piece size less than minimum required size: 128 < 256", deal.Message) + }, + }, + "Get balance error": { + nodeParams: nodeParams{ + ClientMarketBalanceError: errors.New("could not get balance"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: node error getting client market balance failed: could not get balance", deal.Message) + }, + }, + "Not enough funds": { + nodeParams: nodeParams{ + ClientMarketBalance: big.NewInt(200*10000 - 1), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.True(t, strings.Contains(deal.Message, "deal rejected: clientMarketBalance.Available too small")) + }, + }, + "Not enough funds due to client collateral": { + nodeParams: nodeParams{ + ClientMarketBalance: big.NewInt(200*10000 + 99), + }, + dealParams: dealParams{ + ClientCollateral: big.NewInt(100), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.True(t, strings.Contains(deal.Message, "deal rejected: clientMarketBalance.Available too small")) + }, + }, + "verified deal succeeds": { + dealParams: dealParams{ + VerifiedDeal: true, + }, + nodeParams: nodeParams{ + DataCap: &bigDataCap, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + require.True(t, deal.Proposal.VerifiedDeal) + tut.AssertDealState(t, storagemarket.StorageDealAcceptWait, deal.State) + }, + }, + "verified deal fails getting client data cap": { + dealParams: dealParams{ + VerifiedDeal: true, + }, + nodeParams: nodeParams{ + GetDataCapError: xerrors.Errorf("failure getting data cap"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + require.True(t, deal.Proposal.VerifiedDeal) + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: node error fetching verified data cap: failure getting data cap", deal.Message) + }, + }, + "verified deal fails data cap not found": { + dealParams: dealParams{ + VerifiedDeal: true, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + require.True(t, deal.Proposal.VerifiedDeal) + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: node error fetching verified data cap: data cap missing -- client not verified", deal.Message) + }, + }, + "verified deal fails with insufficient data cap": { + dealParams: dealParams{ + VerifiedDeal: true, + }, + nodeParams: nodeParams{ + DataCap: &smallDataCap, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + require.True(t, deal.Proposal.VerifiedDeal) + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: verified deal DataCap too small for proposed piece size", deal.Message) + }, + }, + "invalid piece size": { + dealParams: dealParams{ + PieceSize: 129, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: proposal piece size is invalid: padded piece size must be a power of 2", deal.Message) + }, + }, + "invalid piece cid prefix": { + dealParams: dealParams{ + PieceCid: &tut.GenerateCids(1)[0], + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: proposal PieceCID had wrong prefix", deal.Message) + }, + }, + "end epoch before start": { + dealParams: dealParams{ + StartEpoch: 1000, + EndEpoch: 900, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: proposal end before proposal start", deal.Message) + }, + }, + "start epoch has already passed": { + dealParams: dealParams{ + StartEpoch: defaultHeight - 1, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: deal start epoch has already elapsed", deal.Message) + }, + }, + "deal duration too short (less than 180 days)": { + dealParams: dealParams{ + StartEpoch: defaultHeight, + EndEpoch: defaultHeight + builtin.EpochsInDay*180 - 1, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.True(t, strings.Contains(deal.Message, "deal rejected: deal duration out of bounds")) + }, + }, + "deal duration too long (more than 540 days)": { + nodeParams: nodeParams{ + ClientMarketBalance: big.Mul(abi.NewTokenAmount(builtin.EpochsInDay*54+1), defaultStoragePricePerEpoch), + }, + dealParams: dealParams{ + StartEpoch: defaultHeight, + EndEpoch: defaultHeight + builtin.EpochsInDay*540 + 1, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.True(t, strings.Contains(deal.Message, "deal rejected: deal duration out of bounds")) + }, + }, + "end epoch too long after current epoch": { + nodeParams: nodeParams{ + Height: defaultHeight - 10, + }, + dealParams: dealParams{ + StartEpoch: defaultHeight, + EndEpoch: defaultHeight + builtin.EpochsInDay*540, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.True(t, strings.Contains(deal.Message, "invalid deal end epoch")) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runValidateDealProposal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestDecideOnProposal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runDecideOndeal := makeExecutor(ctx, eventProcessor, providerstates.DecideOnProposal, storagemarket.StorageDealAcceptWait) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealWaitingForData, deal.State) + }, + }, + "Custom Decision Rejects Deal": { + environmentParams: environmentParams{ + RejectDeal: true, + RejectReason: "I just don't like it", + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: I just don't like it", deal.Message) + }, + }, + "Custom Decision Errors": { + environmentParams: environmentParams{ + DecisionError: errors.New("I can't make up my mind"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) + require.Equal(t, "deal rejected: custom deal decision logic failed: I can't make up my mind", deal.Message) + }, + }, + "SendSignedResponse errors": { + environmentParams: environmentParams{ + SendSignedResponseError: errors.New("could not send"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "sending response to deal: could not send", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runDecideOndeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestWaitForTransferRestart(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + awaitRestartTimeout := make(chan time.Time) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + state storagemarket.StorageDealStatus + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "no timeout fired": { + environmentParams: environmentParams{}, + state: storagemarket.StorageDealProviderTransferAwaitRestart, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealProviderTransferAwaitRestart, deal.State) + }, + }, + + "fires after state change": { + environmentParams: environmentParams{ + AwaitRestartTimeout: awaitRestartTimeout, + }, + state: storagemarket.StorageDealTransferring, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealTransferring, deal.State) + }, + }, + + "firsts without state change": { + environmentParams: environmentParams{ + AwaitRestartTimeout: awaitRestartTimeout, + }, + state: storagemarket.StorageDealProviderTransferAwaitRestart, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "timed out waiting for client to restart transfer", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runWaitForTransferRestart := makeExecutor(ctx, eventProcessor, providerstates.WaitForTransferRestart, data.state) + runWaitForTransferRestart(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} +func TestVerifyData(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + expMetaPath := filestore.Path("somemetadata.txt") + runVerifyData := makeExecutor(ctx, eventProcessor, providerstates.VerifyData, storagemarket.StorageDealVerifyData) + + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + environmentParams: environmentParams{ + MetadataPath: expMetaPath, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealReserveProviderFunds, deal.State) + require.Equal(t, filestore.Path(""), deal.PiecePath) + require.Equal(t, expMetaPath, deal.MetadataPath) + }, + }, + + "finalize blockstore fails": { + environmentParams: environmentParams{ + FinalizeBlockstoreError: errors.New("finalize error"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + //require.Contains(t, deal.Message, "finalize error") + }, + }, + + "generate piece CID fails": { + environmentParams: environmentParams{ + GenerateCommPError: errors.New("could not generate CommP"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "deal data verification failed: error generating CommP: could not generate CommP", deal.Message) + }, + }, + "piece CIDs do not match": { + environmentParams: environmentParams{ + MetadataPath: expMetaPath, + PieceCid: tut.GenerateCids(1)[0], + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "deal data verification failed: proposal CommP doesn't match calculated CommP", deal.Message) + require.Equal(t, filestore.Path(""), deal.PiecePath) + require.Equal(t, expMetaPath, deal.MetadataPath) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runVerifyData(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestWaitForFunding(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runWaitForFunding := makeExecutor(ctx, eventProcessor, providerstates.WaitForFunding, storagemarket.StorageDealProviderFunding) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + nodeParams: nodeParams{ + WaitForMessageExitCode: exitcode.Ok, + WaitForMessageRetBytes: []byte{}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealPublish, deal.State) + }, + }, + "AddFunds returns non-ok exit code": { + nodeParams: nodeParams{ + WaitForMessageExitCode: exitcode.ErrInsufficientFunds, + WaitForMessageRetBytes: []byte{}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, fmt.Sprintf("error calling node: AddFunds exit code: %s", exitcode.ErrInsufficientFunds), deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runWaitForFunding(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestReserveProviderFunds(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runReserveProviderFunds := makeExecutor(ctx, eventProcessor, providerstates.ReserveProviderFunds, storagemarket.StorageDealReserveProviderFunds) + cids := tut.GenerateCids(1) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds immediately": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealPublish, deal.State) + require.Equal(t, env.node.DealFunds.ReserveCalls[0], deal.Proposal.ProviderBalanceRequirement()) + require.Len(t, env.node.DealFunds.ReleaseCalls, 0) + require.Equal(t, deal.Proposal.ProviderBalanceRequirement(), deal.FundsReserved) + }, + }, + "succeeds by sending an AddBalance message": { + dealParams: dealParams{ + ProviderCollateral: abi.NewTokenAmount(1), + }, + nodeParams: nodeParams{ + AddFundsCid: cids[0], + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealProviderFunding, deal.State) + require.Equal(t, &cids[0], deal.AddFundsCid) + require.Equal(t, env.node.DealFunds.ReserveCalls[0], deal.Proposal.ProviderBalanceRequirement()) + require.Len(t, env.node.DealFunds.ReleaseCalls, 0) + require.Equal(t, deal.Proposal.ProviderBalanceRequirement(), deal.FundsReserved) + }, + }, + "get miner worker fails": { + nodeParams: nodeParams{ + MinerWorkerError: errors.New("could not get worker"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Len(t, env.node.DealFunds.ReserveCalls, 0) + require.Len(t, env.node.DealFunds.ReleaseCalls, 0) + require.True(t, deal.FundsReserved.Nil()) + require.Equal(t, "error calling node: looking up miner worker: could not get worker", deal.Message) + }, + }, + "reserveFunds errors": { + nodeParams: nodeParams{ + ReserveFundsError: errors.New("not enough funds"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error calling node: reserving funds: not enough funds", deal.Message) + require.Len(t, env.node.DealFunds.ReserveCalls, 0) + require.Len(t, env.node.DealFunds.ReleaseCalls, 0) + require.True(t, deal.FundsReserved.Nil()) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runReserveProviderFunds(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestPublishDeal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runPublishDeal := makeExecutor(ctx, eventProcessor, providerstates.PublishDeal, storagemarket.StorageDealPublish) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealPublishing, deal.State) + }, + }, + "PublishDealsErrors returns not enough funds error": { + nodeParams: nodeParams{ + PublishDealsError: errors.New("not enough funds"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealPublish, deal.State) + require.Equal(t, "", deal.Message) + }, + }, + "PublishDealsErrors errors": { + nodeParams: nodeParams{ + PublishDealsError: errors.New("could not post to chain"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error calling node: publishing deal: could not post to chain", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runPublishDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestWaitForPublish(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runWaitForPublish := makeExecutor(ctx, eventProcessor, providerstates.WaitForPublish, storagemarket.StorageDealPublishing) + expDealID := abi.DealID(10) + finalCid := tut.GenerateCids(10)[9] + + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealParams: dealParams{ + ReserveFunds: true, + }, + nodeParams: nodeParams{ + PublishDealID: expDealID, + WaitForMessagePublishCid: finalCid, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealStaged, deal.State) + require.Equal(t, expDealID, deal.DealID) + assert.Equal(t, env.node.DealFunds.ReleaseCalls[0], deal.Proposal.ProviderBalanceRequirement()) + assert.True(t, deal.FundsReserved.Nil() || deal.FundsReserved.IsZero()) + assert.Equal(t, deal.PublishCid, &finalCid) + }, + }, + "succeeds, funds already released": { + nodeParams: nodeParams{ + PublishDealID: expDealID, + WaitForMessagePublishCid: finalCid, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealStaged, deal.State) + require.Equal(t, expDealID, deal.DealID) + assert.Len(t, env.node.DealFunds.ReleaseCalls, 0) + }, + }, + "PublishStorageDeal errors": { + nodeParams: nodeParams{ + WaitForPublishDealsError: errors.New("wait publish err"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "PublishStorageDeal error: PublishStorageDeals errored: wait publish err", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runWaitForPublish(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestHandoffDeal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runHandoffDeal := makeExecutor(ctx, eventProcessor, providerstates.HandoffDeal, storagemarket.StorageDealStaged) + carv2Reader := &carv2.Reader{} + + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds for offline deal": { + dealParams: dealParams{ + PiecePath: defaultPath, + FastRetrieval: true, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedOpens: []filestore.Path{defaultPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Len(t, env.node.OnDealCompleteCalls, 1) + require.True(t, env.node.OnDealCompleteCalls[0].FastRetrieval) + require.True(t, deal.AvailableForRetrieval) + }, + }, + + "succeed, assemble piece on demand": { + dealParams: dealParams{ + FastRetrieval: true, + }, + environmentParams: environmentParams{ + Carv2Reader: carv2Reader, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Len(t, env.node.OnDealCompleteCalls, 1) + require.True(t, env.node.OnDealCompleteCalls[0].FastRetrieval) + require.True(t, deal.AvailableForRetrieval) + }, + }, + + "fails when can't get a CARv2 reader": { + dealParams: dealParams{ + FastRetrieval: true, + }, + environmentParams: environmentParams{ + Carv2Reader: carv2Reader, + Carv2Error: errors.New("reader error"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Len(t, env.node.OnDealCompleteCalls, 0) + require.Empty(t, env.node.OnDealCompleteCalls) + require.Contains(t, deal.Message, "reader error") + }, + }, + + "succeeds w metadata": { + dealParams: dealParams{ + PiecePath: defaultPath, + MetadataPath: defaultMetadataPath, + FastRetrieval: true, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile, defaultMetadataFile}, + ExpectedOpens: []filestore.Path{defaultPath, defaultMetadataPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Len(t, env.node.OnDealCompleteCalls, 1) + require.True(t, env.node.OnDealCompleteCalls[0].FastRetrieval) + require.True(t, deal.AvailableForRetrieval) + }, + }, + + "reading metadata fails": { + dealParams: dealParams{ + PiecePath: defaultPath, + MetadataPath: filestore.Path("Missing.txt"), + FastRetrieval: true, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedOpens: []filestore.Path{defaultPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Equal(t, fmt.Sprintf("recording piece for retrieval: failed to register deal data for piece %s for retrieval: failed to load block locations: file not found", deal.Ref.PieceCid), deal.Message) + }, + }, + + "add piece block locations errors": { + dealParams: dealParams{ + PiecePath: defaultPath, + FastRetrieval: true, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedOpens: []filestore.Path{defaultPath}, + }, + pieceStoreParams: tut.TestPieceStoreParams{ + AddPieceBlockLocationsError: errors.New("could not add block locations"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Equal(t, fmt.Sprintf("recording piece for retrieval: failed to register deal data for piece %s for retrieval: failed to add piece block locations: could not add block locations", deal.Ref.PieceCid), deal.Message) + }, + }, + + "add deal for piece errors": { + dealParams: dealParams{ + PiecePath: defaultPath, + FastRetrieval: true, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedOpens: []filestore.Path{defaultPath}, + }, + pieceStoreParams: tut.TestPieceStoreParams{ + AddDealForPieceError: errors.New("could not add deal info"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Equal(t, fmt.Sprintf("recording piece for retrieval: failed to register deal data for piece %s for retrieval: failed to add deal for piece: could not add deal info", deal.Ref.PieceCid), deal.Message) + }, + }, + "opening file errors": { + dealParams: dealParams{ + PiecePath: filestore.Path("missing.txt"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, fmt.Sprintf("accessing file store: reading piece at path missing.txt: %s", tut.TestErrNotFound.Error()), deal.Message) + }, + }, + + "OnDealComplete errors": { + dealParams: dealParams{ + PiecePath: defaultPath, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedOpens: []filestore.Path{defaultPath}, + }, + nodeParams: nodeParams{ + OnDealCompleteError: errors.New("failed building sector"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "handing off deal to node: packing piece at path file.txt: failed building sector", deal.Message) + }, + }, + + "assemble piece on demand fails because OnComplete fails": { + environmentParams: environmentParams{ + Carv2Reader: carv2Reader, + }, + dealParams: dealParams{ + FastRetrieval: true, + }, + nodeParams: nodeParams{ + OnDealCompleteError: errors.New("failed building sector"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Contains(t, deal.Message, "failed building sector") + }, + }, + + "succeeds even if shard activation fails": { + dealParams: dealParams{ + FastRetrieval: true, + }, + environmentParams: environmentParams{ + Carv2Reader: carv2Reader, + ShardActivationError: errors.New("some error"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealAwaitingPreCommit, deal.State) + require.Len(t, env.node.OnDealCompleteCalls, 1) + require.True(t, env.node.OnDealCompleteCalls[0].FastRetrieval) + require.True(t, deal.AvailableForRetrieval) + }, + }, + } + + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runHandoffDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestVerifyDealPrecommitted(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runVerifyDealActivated := makeExecutor(ctx, eventProcessor, providerstates.VerifyDealPreCommitted, storagemarket.StorageDealAwaitingPreCommit) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + nodeParams: nodeParams{ + PreCommittedSectorNumber: abi.SectorNumber(10), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealSealing, deal.State) + require.Equal(t, abi.SectorNumber(10), deal.SectorNumber) + }, + }, + "succeeds, active": { + nodeParams: nodeParams{ + PreCommittedIsActive: true, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFinalizing, deal.State) + }, + }, + "sync error": { + nodeParams: nodeParams{ + DealPreCommittedSyncError: errors.New("couldn't check deal pre-commit"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error awaiting deal pre-commit: couldn't check deal pre-commit", deal.Message) + }, + }, + "async error": { + nodeParams: nodeParams{ + DealPreCommittedAsyncError: errors.New("deal did not appear on chain"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error awaiting deal pre-commit: deal did not appear on chain", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runVerifyDealActivated(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestVerifyDealActivated(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runVerifyDealActivated := makeExecutor(ctx, eventProcessor, providerstates.VerifyDealActivated, storagemarket.StorageDealSealing) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFinalizing, deal.State) + }, + }, + "sync error": { + nodeParams: nodeParams{ + DealCommittedSyncError: errors.New("couldn't check deal commitment"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error activating deal: couldn't check deal commitment", deal.Message) + }, + }, + "async error": { + nodeParams: nodeParams{ + DealCommittedAsyncError: errors.New("deal did not appear on chain"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, "error activating deal: deal did not appear on chain", deal.Message) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runVerifyDealActivated(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestCleanupDeal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runCleanupDeal := makeExecutor(ctx, eventProcessor, providerstates.CleanupDeal, storagemarket.StorageDealFinalizing) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealParams: dealParams{ + PiecePath: defaultPath, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile}, + ExpectedDeletions: []filestore.Path{defaultPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) + }, + }, + "succeeds w metadata": { + dealParams: dealParams{ + PiecePath: defaultPath, + MetadataPath: defaultMetadataPath, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile, defaultMetadataFile}, + ExpectedDeletions: []filestore.Path{defaultMetadataPath, defaultPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runCleanupDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestWaitForDealCompletion(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runWaitForDealCompletion := makeExecutor(ctx, eventProcessor, providerstates.WaitForDealCompletion, storagemarket.StorageDealActive) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "slashing succeeds": { + nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(5)}, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealSlashed, deal.State) + require.Equal(t, abi.ChainEpoch(5), deal.SlashEpoch) + require.Len(t, env.peerTagger.UntagCalls, 1) + require.Equal(t, deal.Client, env.peerTagger.UntagCalls[0]) + }, + }, + "expiration succeeds": { + // OnDealSlashedEpoch of zero signals to test node to call onDealExpired() + nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(0)}, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealExpired, deal.State) + require.Len(t, env.peerTagger.UntagCalls, 1) + require.Equal(t, deal.Client, env.peerTagger.UntagCalls[0]) + }, + }, + "slashing fails": { + nodeParams: nodeParams{OnDealSlashedError: errors.New("an err")}, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + require.Equal(t, "error waiting for deal completion: deal slashing err: an err", deal.Message) + }, + }, + "expiration fails": { + nodeParams: nodeParams{OnDealExpiredError: errors.New("an err")}, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + require.Equal(t, "error waiting for deal completion: deal expiration err: an err", deal.Message) + }, + }, + "fails synchronously": { + nodeParams: nodeParams{WaitForDealCompletionError: errors.New("an err")}, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + require.Equal(t, "error waiting for deal completion: an err", deal.Message) + }, + }, + } + + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runWaitForDealCompletion(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestRejectDeal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runRejectDeal := makeExecutor(ctx, eventProcessor, providerstates.RejectDeal, storagemarket.StorageDealRejecting) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, 1, env.disconnectCalls) + }, + }, + "fails if it cannot send a response": { + environmentParams: environmentParams{ + SendSignedResponseError: xerrors.New("error sending response"), + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) + require.Equal(t, deal.Message, "sending response to deal: error sending response") + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runRejectDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +func TestFailDeal(t *testing.T) { + ctx := context.Background() + eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) + require.NoError(t, err) + runFailDeal := makeExecutor(ctx, eventProcessor, providerstates.FailDeal, storagemarket.StorageDealFailing) + tests := map[string]struct { + nodeParams nodeParams + dealParams dealParams + environmentParams environmentParams + fileStoreParams tut.TestFileStoreParams + pieceStoreParams tut.TestPieceStoreParams + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) + }{ + "succeeds": { + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + }, + }, + "succeeds, funds released": { + dealParams: dealParams{ + ReserveFunds: true, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + assert.Equal(t, env.node.DealFunds.ReleaseCalls[0], deal.Proposal.ProviderBalanceRequirement()) + assert.True(t, deal.FundsReserved.Nil() || deal.FundsReserved.IsZero()) + }, + }, + "succeeds, file deletions": { + dealParams: dealParams{ + PiecePath: defaultPath, + MetadataPath: defaultMetadataPath, + }, + fileStoreParams: tut.TestFileStoreParams{ + Files: []filestore.File{defaultDataFile, defaultMetadataFile}, + ExpectedDeletions: []filestore.Path{defaultPath, defaultMetadataPath}, + }, + dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { + tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) + }, + }, + } + for test, data := range tests { + t.Run(test, func(t *testing.T) { + runFailDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) + }) + } +} + +// all of these default parameters are setup to allow a deal to complete each handler with no errors +var defaultHeight = abi.ChainEpoch(50) +var defaultTipSetToken = []byte{1, 2, 3} +var defaultStoragePricePerEpoch = abi.NewTokenAmount(10000) +var defaultPieceSize = abi.PaddedPieceSize(1048576) +var defaultStartEpoch = abi.ChainEpoch(200) +var defaultEndEpoch = defaultStartEpoch + ((24*3600)/30)*200 // 200 days + +var defaultPieceCid = mkPieceCid("piece cid") +var defaultPath = filestore.Path("file.txt") +var defaultMetadataPath = filestore.Path("metadataPath.txt") +var defaultClientAddress = address.TestAddress +var defaultProviderAddress = address.TestAddress2 +var defaultMinerAddr, _ = address.NewActorAddress([]byte("miner")) +var defaultClientCollateral = abi.NewTokenAmount(0) +var defaultProviderCollateral = abi.NewTokenAmount(10000) +var defaultDataRef = storagemarket.DataRef{ + Root: tut.GenerateCids(1)[0], + TransferType: storagemarket.TTGraphsync, +} +var defaultClientMarketBalance = big.Mul(big.NewInt(int64(defaultEndEpoch-defaultStartEpoch)), defaultStoragePricePerEpoch) + +var defaultAsk = storagemarket.StorageAsk{ + Price: abi.NewTokenAmount(10000000), + VerifiedPrice: abi.NewTokenAmount(1000000), + MinPieceSize: abi.PaddedPieceSize(256), + MaxPieceSize: 1 << 20, +} + +var testData = tut.NewTestIPLDTree() +var dataBuf = new(bytes.Buffer) +var blockLocationBuf = new(bytes.Buffer) +var _ error = testData.DumpToCar(dataBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) +var defaultDataFile = tut.NewTestFile(tut.TestFileParams{ + Buffer: dataBuf, + Path: defaultPath, + Size: 400, +}) +var defaultMetadataFile = tut.NewTestFile(tut.TestFileParams{ + Buffer: blockLocationBuf, + Path: defaultMetadataPath, + Size: 400, +}) + +func mkPieceCid(input string) cid.Cid { + var prefix = cid.Prefix{ + Version: 1, + Codec: cid.FilCommitmentUnsealed, + MhType: mh.SHA2_256_TRUNC254_PADDED, + MhLength: 32, + } + + data := []byte(input) + + c, err := prefix.Sum(data) + switch err { + case mh.ErrSumNotSupported: + // multihash library doesn't support this hash function. + // just fake it. + case nil: + return c + default: + //panic(err) + } + + sum := sha256.Sum256(data) + hash, err := mh.Encode(sum[:], prefix.MhType) + if err != nil { + panic(err) + } + return cid.NewCidV1(prefix.Codec, hash) +} + +func generatePublishDealsReturn(t *testing.T) (abi.DealID, []byte) { + dealId := abi.DealID(rand.Uint64()) + + psdReturn := market.PublishStorageDealsReturn{IDs: []abi.DealID{dealId}} + psdReturnBytes := bytes.NewBuffer([]byte{}) + err := psdReturn.MarshalCBOR(psdReturnBytes) + require.NoError(t, err) + + return dealId, psdReturnBytes.Bytes() +} + +type nodeParams struct { + MinerAddr address.Address + MinerWorkerError error + ReserveFundsError error + Height abi.ChainEpoch + TipSetToken shared.TipSetToken + ClientMarketBalance abi.TokenAmount + ClientMarketBalanceError error + AddFundsCid cid.Cid + VerifySignatureFails bool + MostRecentStateIDError error + PieceLength uint64 + PieceSectorID uint64 + PublishDealsError error + PublishDealID abi.DealID + WaitForPublishDealsError error + OnDealCompleteError error + PreCommittedSectorNumber abi.SectorNumber + PreCommittedIsActive bool + DealPreCommittedSyncError error + DealPreCommittedAsyncError error + DealCommittedSyncError error + DealCommittedAsyncError error + WaitForMessageBlocks bool + WaitForMessagePublishCid cid.Cid + WaitForMessageError error + WaitForMessageExitCode exitcode.ExitCode + WaitForMessageRetBytes []byte + WaitForDealCompletionError error + OnDealExpiredError error + OnDealSlashedError error + OnDealSlashedEpoch abi.ChainEpoch + DataCap *verifreg.DataCap + GetDataCapError error +} + +type dealParams struct { + PieceCid *cid.Cid + PiecePath filestore.Path + MetadataPath filestore.Path + DealID abi.DealID + DataRef *storagemarket.DataRef + StoragePricePerEpoch abi.TokenAmount + ProviderCollateral abi.TokenAmount + ClientCollateral abi.TokenAmount + PieceSize abi.PaddedPieceSize + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + FastRetrieval bool + VerifiedDeal bool + ReserveFunds bool + TransferChannelId *datatransfer.ChannelID + Label market.DealLabel +} + +type environmentParams struct { + Address address.Address + Ask storagemarket.StorageAsk + DataTransferError error + PieceCid cid.Cid + MetadataPath filestore.Path + GenerateCommPError error + PieceReader io.ReadCloser + PieceSize uint64 + SendSignedResponseError error + DisconnectError error + TagsProposal bool + RejectDeal bool + RejectReason string + DecisionError error + RestartDataTransferError error + AwaitRestartTimeout chan time.Time + FinalizeBlockstoreError error + + Carv2Reader *carv2.Reader + Carv2Error error + + ShardActivationError error +} + +type executor func(t *testing.T, + node nodeParams, + params environmentParams, + dealParams dealParams, + fileStoreParams tut.TestFileStoreParams, + pieceStoreParams tut.TestPieceStoreParams, + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment)) + +func makeExecutor(ctx context.Context, + eventProcessor fsm.EventProcessor, + stateEntryFunc providerstates.ProviderStateEntryFunc, + initialState storagemarket.StorageDealStatus) executor { + return func(t *testing.T, + nodeParams nodeParams, + params environmentParams, + dealParams dealParams, + fileStoreParams tut.TestFileStoreParams, + pieceStoreParams tut.TestPieceStoreParams, + dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment)) { + + smstate := testnodes.NewStorageMarketState() + if nodeParams.Height != abi.ChainEpoch(0) { + smstate.Epoch = nodeParams.Height + smstate.TipSetToken = nodeParams.TipSetToken + } else { + smstate.Epoch = defaultHeight + smstate.TipSetToken = defaultTipSetToken + } + if !nodeParams.ClientMarketBalance.Nil() { + smstate.AddFunds(defaultClientAddress, nodeParams.ClientMarketBalance) + } else { + smstate.AddFunds(defaultClientAddress, defaultClientMarketBalance) + } + + common := testnodes.FakeCommonNode{ + SMState: smstate, + DealFunds: tut.NewTestDealFunds(), + GetChainHeadError: nodeParams.MostRecentStateIDError, + GetBalanceError: nodeParams.ClientMarketBalanceError, + VerifySignatureFails: nodeParams.VerifySignatureFails, + ReserveFundsError: nodeParams.ReserveFundsError, + PreCommittedIsActive: nodeParams.PreCommittedIsActive, + PreCommittedSectorNumber: nodeParams.PreCommittedSectorNumber, + DealPreCommittedSyncError: nodeParams.DealPreCommittedSyncError, + DealPreCommittedAsyncError: nodeParams.DealPreCommittedAsyncError, + DealCommittedSyncError: nodeParams.DealCommittedSyncError, + DealCommittedAsyncError: nodeParams.DealCommittedAsyncError, + AddFundsCid: nodeParams.AddFundsCid, + WaitForMessageBlocks: nodeParams.WaitForMessageBlocks, + WaitForMessageError: nodeParams.WaitForMessageError, + WaitForMessageFinalCid: nodeParams.WaitForMessagePublishCid, + WaitForMessageExitCode: nodeParams.WaitForMessageExitCode, + WaitForMessageRetBytes: nodeParams.WaitForMessageRetBytes, + WaitForDealCompletionError: nodeParams.WaitForDealCompletionError, + OnDealExpiredError: nodeParams.OnDealExpiredError, + OnDealSlashedError: nodeParams.OnDealSlashedError, + OnDealSlashedEpoch: nodeParams.OnDealSlashedEpoch, + } + + node := &testnodes.FakeProviderNode{ + FakeCommonNode: common, + MinerAddr: nodeParams.MinerAddr, + MinerWorkerError: nodeParams.MinerWorkerError, + PieceLength: nodeParams.PieceLength, + PieceSectorID: nodeParams.PieceSectorID, + PublishDealsError: nodeParams.PublishDealsError, + PublishDealID: nodeParams.PublishDealID, + WaitForPublishDealsError: nodeParams.WaitForPublishDealsError, + OnDealCompleteError: nodeParams.OnDealCompleteError, + OnDealCompleteSkipCommP: true, + DataCap: nodeParams.DataCap, + GetDataCapErr: nodeParams.GetDataCapError, + } + + if nodeParams.MinerAddr == address.Undef { + node.MinerAddr = defaultMinerAddr + } + + proposal := market.DealProposal{ + PieceCID: defaultPieceCid, + PieceSize: defaultPieceSize, + Client: defaultClientAddress, + Provider: defaultProviderAddress, + StartEpoch: defaultStartEpoch, + EndEpoch: defaultEndEpoch, + StoragePricePerEpoch: defaultStoragePricePerEpoch, + ProviderCollateral: defaultProviderCollateral, + ClientCollateral: defaultClientCollateral, + Label: dealParams.Label, + } + if dealParams.PieceCid != nil { + proposal.PieceCID = *dealParams.PieceCid + } + if !dealParams.StoragePricePerEpoch.Nil() { + proposal.StoragePricePerEpoch = dealParams.StoragePricePerEpoch + } + if !dealParams.ProviderCollateral.Nil() { + proposal.ProviderCollateral = dealParams.ProviderCollateral + } + if !dealParams.ClientCollateral.Nil() { + proposal.ClientCollateral = dealParams.ClientCollateral + } + if dealParams.StartEpoch != abi.ChainEpoch(0) { + proposal.StartEpoch = dealParams.StartEpoch + } + if dealParams.EndEpoch != abi.ChainEpoch(0) { + proposal.EndEpoch = dealParams.EndEpoch + } + if dealParams.PieceSize != abi.PaddedPieceSize(0) { + proposal.PieceSize = dealParams.PieceSize + } + proposal.VerifiedDeal = dealParams.VerifiedDeal + signedProposal := &market.ClientDealProposal{ + Proposal: proposal, + ClientSignature: *tut.MakeTestSignature(), + } + dataRef := &defaultDataRef + if dealParams.DataRef != nil { + dataRef = dealParams.DataRef + } + dealState, err := tut.MakeTestMinerDeal(initialState, + signedProposal, dataRef) + require.NoError(t, err) + dealState.AddFundsCid = &tut.GenerateCids(1)[0] + dealState.PublishCid = &tut.GenerateCids(1)[0] + if dealParams.PiecePath != filestore.Path("") { + dealState.PiecePath = dealParams.PiecePath + } + if dealParams.MetadataPath != filestore.Path("") { + dealState.MetadataPath = dealParams.MetadataPath + } + if dealParams.DealID != abi.DealID(0) { + dealState.DealID = dealParams.DealID + } + dealState.FastRetrieval = dealParams.FastRetrieval + if dealParams.ReserveFunds { + dealState.FundsReserved = proposal.ProviderCollateral + } + if dealParams.TransferChannelId != nil { + dealState.TransferChannelId = dealParams.TransferChannelId + } + + fs := tut.NewTestFileStore(fileStoreParams) + pieceStore := tut.NewTestPieceStoreWithParams(pieceStoreParams) + expectedTags := make(map[string]struct{}) + if params.TagsProposal { + expectedTags[dealState.ProposalCid.String()] = struct{}{} + } + environment := &fakeEnvironment{ + expectedTags: expectedTags, + receivedTags: make(map[string]struct{}), + address: params.Address, + node: node, + ask: params.Ask, + dataTransferError: params.DataTransferError, + pieceCid: params.PieceCid, + metadataPath: params.MetadataPath, + generateCommPError: params.GenerateCommPError, + pieceReader: params.PieceReader, + pieceSize: params.PieceSize, + sendSignedResponseError: params.SendSignedResponseError, + disconnectError: params.DisconnectError, + rejectDeal: params.RejectDeal, + rejectReason: params.RejectReason, + decisionError: params.DecisionError, + fs: fs, + pieceStore: pieceStore, + peerTagger: tut.NewTestPeerTagger(), + + restartDataTransferError: params.RestartDataTransferError, + + finalizeBlockstoreErr: params.FinalizeBlockstoreError, + + carV2Reader: params.Carv2Reader, + carV2Error: params.Carv2Error, + shardActivationError: params.ShardActivationError, + awaitRestartTimeout: params.AwaitRestartTimeout, + } + if environment.pieceCid == cid.Undef { + environment.pieceCid = defaultPieceCid + } + if environment.metadataPath == filestore.Path("") { + environment.metadataPath = defaultMetadataPath + } + if environment.address == address.Undef { + environment.address = defaultProviderAddress + } + if environment.ask == storagemarket.StorageAskUndefined { + environment.ask = defaultAsk + } + if environment.pieceSize == 0 { + environment.pieceSize = uint64(defaultPieceSize.Unpadded()) + } + if environment.pieceReader == nil { + environment.pieceReader = newStubbedReadCloser(nil) + } + + fsmCtx := fsmtest.NewTestContext(ctx, eventProcessor) + err = stateEntryFunc(fsmCtx, environment, *dealState) + require.NoError(t, err) + if environment.awaitRestartTimeout != nil { + environment.awaitRestartTimeout <- time.Now() + time.Sleep(10 * time.Millisecond) + } + fsmCtx.ReplayEvents(t, dealState) + dealInspector(t, *dealState, environment) + + fs.VerifyExpectations(t) + pieceStore.VerifyExpectations(t) + environment.VerifyExpectations(t) + } +} + +type restartDataTransferCall struct { + chId datatransfer.ChannelID +} + +type fakeEnvironment struct { + address address.Address + node *testnodes.FakeProviderNode + ask storagemarket.StorageAsk + dataTransferError error + pieceCid cid.Cid + metadataPath filestore.Path + generateCommPError error + pieceReader io.ReadCloser + pieceSize uint64 + sendSignedResponseError error + disconnectCalls int + disconnectError error + rejectDeal bool + rejectReason string + decisionError error + fs filestore.FileStore + pieceStore piecestore.PieceStore + expectedTags map[string]struct{} + receivedTags map[string]struct{} + peerTagger *tut.TestPeerTagger + + finalizeBlockstoreErr error + + restartDataTransferCalls []restartDataTransferCall + restartDataTransferError error + + carV2Reader *carv2.Reader + carV2Error error + awaitRestartTimeout chan time.Time + shardActivationError error +} + +func (fe *fakeEnvironment) RemoveIndex(ctx context.Context, proposalCid cid.Cid) error { + return nil +} + +func (fe *fakeEnvironment) RestartDataTransfer(_ context.Context, chId datatransfer.ChannelID) error { + fe.restartDataTransferCalls = append(fe.restartDataTransferCalls, restartDataTransferCall{chId}) + return fe.restartDataTransferError +} + +func (fe *fakeEnvironment) Address() address.Address { + return fe.address +} + +func (fe *fakeEnvironment) Node() storagemarket.StorageProviderNode { + return fe.node +} + +func (fe *fakeEnvironment) Ask() storagemarket.StorageAsk { + return fe.ask +} + +func (fe *fakeEnvironment) SendSignedResponse(ctx context.Context, response *network.Response) error { + return fe.sendSignedResponseError +} + +func (fe *fakeEnvironment) VerifyExpectations(t *testing.T) { + require.Equal(t, fe.expectedTags, fe.receivedTags) +} + +func (fe *fakeEnvironment) Disconnect(proposalCid cid.Cid) error { + fe.disconnectCalls += 1 + return fe.disconnectError +} + +func (fe *fakeEnvironment) FileStore() filestore.FileStore { + return fe.fs +} + +func (fe *fakeEnvironment) PieceStore() piecestore.PieceStore { + return fe.pieceStore +} + +func (fe *fakeEnvironment) RunCustomDecisionLogic(context.Context, storagemarket.MinerDeal) (bool, string, error) { + return !fe.rejectDeal, fe.rejectReason, fe.decisionError +} + +func (fe *fakeEnvironment) TagPeer(id peer.ID, s string) { + fe.peerTagger.TagPeer(id, s) +} + +func (fe *fakeEnvironment) UntagPeer(id peer.ID, s string) { + fe.peerTagger.UntagPeer(id, s) +} + +func (fe *fakeEnvironment) RegisterShard(ctx context.Context, pieceCid cid.Cid, path string, eagerInit bool) error { + return fe.shardActivationError +} + +func (fe *fakeEnvironment) TerminateBlockstore(proposalCid cid.Cid, carFilePath string) error { + return nil +} + +func (fe *fakeEnvironment) GeneratePieceCommitment(proposalCid cid.Cid, _ string, dealSize abi.PaddedPieceSize) (cid.Cid, filestore.Path, error) { + return fe.pieceCid, fe.metadataPath, fe.generateCommPError +} + +func (fe *fakeEnvironment) FinalizeBlockstore(proposalCid cid.Cid) error { + return fe.finalizeBlockstoreErr +} + +func (fe *fakeEnvironment) ReadCAR(_ string) (*carv2.Reader, error) { + return fe.carV2Reader, fe.carV2Error +} + +func (fe *fakeEnvironment) AwaitRestartTimeout() <-chan time.Time { + return fe.awaitRestartTimeout +} + +func (fe *fakeEnvironment) AnnounceIndex(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { + return cid.Undef, nil +} + +var _ providerstates.ProviderDealEnvironment = &fakeEnvironment{} + +type stubbedReadCloser struct { + err error +} + +func (src *stubbedReadCloser) Read(p []byte) (n int, err error) { + return 0, io.EOF +} + +func (src *stubbedReadCloser) Close() error { + return src.err +} + +func newStubbedReadCloser(err error) io.ReadCloser { + return &stubbedReadCloser{err} +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils.go new file mode 100644 index 00000000000..6e44d63186f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils.go @@ -0,0 +1,90 @@ +// Package providerutils provides utility functions for the storage provider & provider FSM +package providerutils + +import ( + "context" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" +) + +// VerifyFunc is a function that can validate a signature for a given address and bytes +type VerifyFunc func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) + +// VerifyProposal verifies the signature on the given signed proposal matches +// the client addres for the proposal, using the given signature verification function +func VerifyProposal(ctx context.Context, sdp market.ClientDealProposal, tok shared.TipSetToken, verifier VerifyFunc) error { + b, err := cborutil.Dump(&sdp.Proposal) + if err != nil { + return err + } + + return VerifySignature(ctx, sdp.ClientSignature, sdp.Proposal.Client, b, tok, verifier) +} + +// VerifySignature verifies the signature over the given bytes +func VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, buf []byte, tok shared.TipSetToken, verifier VerifyFunc) error { + verified, err := verifier(ctx, signature, signer, buf, tok) + if err != nil { + return xerrors.Errorf("verifying: %w", err) + } + + if !verified { + return xerrors.New("could not verify signature") + } + + return nil +} + +// WorkerLookupFunc is a function that can lookup a miner worker address from a storage miner actor +type WorkerLookupFunc func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) + +// SignFunc is a function that can sign a set of bytes with a given address +type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error) + +// SignMinerData signs the given data structure with a signature for the given address +func SignMinerData(ctx context.Context, data interface{}, address address.Address, tok shared.TipSetToken, workerLookup WorkerLookupFunc, sign SignFunc) (*crypto.Signature, error) { + msg, err := cborutil.Dump(data) + if err != nil { + return nil, xerrors.Errorf("serializing: %w", err) + } + + worker, err := workerLookup(ctx, address, tok) + if err != nil { + return nil, err + } + + sig, err := sign(ctx, worker, msg) + if err != nil { + return nil, xerrors.Errorf("failed to sign: %w", err) + } + return sig, nil +} + +// LoadBlockLocations loads a metadata file then converts it to a map of cid -> blockLocation +func LoadBlockLocations(fs filestore.FileStore, metadataPath filestore.Path) (map[cid.Cid]piecestore.BlockLocation, error) { + metadataFile, err := fs.Open(metadataPath) + if err != nil { + return nil, err + } + metadata, err := blockrecorder.ReadBlockMetadata(metadataFile) + _ = metadataFile.Close() + if err != nil { + return nil, err + } + blockLocations := make(map[cid.Cid]piecestore.BlockLocation, len(metadata)) + for _, metadatum := range metadata { + blockLocations[metadatum.CID] = piecestore.BlockLocation{RelOffset: metadatum.Offset, BlockSize: metadatum.Size} + } + return blockLocations, nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils_test.go new file mode 100644 index 00000000000..be7e16ba936 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/providerutils/providerutils_test.go @@ -0,0 +1,180 @@ +package providerutils_test + +import ( + "bytes" + "context" + "errors" + "math/rand" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +func TestVerifyProposal(t *testing.T) { + tests := map[string]struct { + proposal market.ClientDealProposal + verifier providerutils.VerifyFunc + shouldErr bool + }{ + "successful verification": { + proposal: *shared_testutil.MakeTestClientDealProposal(), + verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { + return true, nil + }, + shouldErr: false, + }, + "bad proposal": { + proposal: market.ClientDealProposal{ + Proposal: market.DealProposal{}, + ClientSignature: *shared_testutil.MakeTestSignature(), + }, + verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { + return true, nil + }, + shouldErr: true, + }, + "verification fails": { + proposal: *shared_testutil.MakeTestClientDealProposal(), + verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { + return false, nil + }, + shouldErr: true, + }, + } + for name, data := range tests { + t.Run(name, func(t *testing.T) { + err := providerutils.VerifyProposal(context.Background(), data.proposal, shared.TipSetToken{}, data.verifier) + require.Equal(t, err != nil, data.shouldErr) + }) + } +} + +func TestSignMinerData(t *testing.T) { + ctx := context.Background() + successLookup := func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) { + return address.TestAddress2, nil + } + successSign := func(context.Context, address.Address, []byte) (*crypto.Signature, error) { + return shared_testutil.MakeTestSignature(), nil + } + tests := map[string]struct { + data interface{} + workerLookup providerutils.WorkerLookupFunc + signBytes providerutils.SignFunc + shouldErr bool + }{ + "succeeds": { + data: shared_testutil.MakeTestStorageAsk(), + workerLookup: successLookup, + signBytes: successSign, + shouldErr: false, + }, + "cbor dump errors": { + data: &network.Response{}, + workerLookup: successLookup, + signBytes: successSign, + shouldErr: true, + }, + "worker lookup errors": { + data: shared_testutil.MakeTestStorageAsk(), + workerLookup: func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) { + return address.Undef, errors.New("Something went wrong") + }, + signBytes: successSign, + shouldErr: true, + }, + "signing errors": { + data: shared_testutil.MakeTestStorageAsk(), + workerLookup: successLookup, + signBytes: func(context.Context, address.Address, []byte) (*crypto.Signature, error) { + return nil, errors.New("something went wrong") + }, + shouldErr: true, + }, + } + for name, data := range tests { + t.Run(name, func(t *testing.T) { + _, err := providerutils.SignMinerData(ctx, data.data, address.TestAddress, shared.TipSetToken{}, data.workerLookup, data.signBytes) + require.Equal(t, err != nil, data.shouldErr) + }) + } +} + +func TestLoadBlockLocations(t *testing.T) { + testData := shared_testutil.NewTestIPLDTree() + + carBuf := new(bytes.Buffer) + blockLocationBuf := new(bytes.Buffer) + err := testData.DumpToCar(carBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) + require.NoError(t, err) + validPath := filestore.Path("valid.data") + validFile := shared_testutil.NewTestFile(shared_testutil.TestFileParams{ + Buffer: blockLocationBuf, + Path: validPath, + }) + missingPath := filestore.Path("missing.data") + invalidPath := filestore.Path("invalid.data") + invalidData := make([]byte, 512) + _, _ = rand.Read(invalidData) + invalidFile := shared_testutil.NewTestFile(shared_testutil.TestFileParams{ + Buffer: bytes.NewBuffer(invalidData), + Path: invalidPath, + }) + fs := shared_testutil.NewTestFileStore(shared_testutil.TestFileStoreParams{ + Files: []filestore.File{validFile, invalidFile}, + ExpectedOpens: []filestore.Path{validPath, invalidPath}, + }) + testCases := map[string]struct { + path filestore.Path + shouldErr bool + expectedCids []cid.Cid + }{ + "valid data": { + path: validPath, + shouldErr: false, + expectedCids: []cid.Cid{ + testData.LeafAlphaBlock.Cid(), + testData.LeafBetaBlock.Cid(), + testData.MiddleListBlock.Cid(), + testData.MiddleMapBlock.Cid(), + testData.RootBlock.Cid(), + }, + }, + "missing data": { + path: missingPath, + shouldErr: true, + }, + "invalid data": { + path: invalidPath, + shouldErr: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + results, err := providerutils.LoadBlockLocations(fs, data.path) + if data.shouldErr { + require.Error(t, err) + require.Nil(t, results) + } else { + require.NoError(t, err) + for _, c := range data.expectedCids { + _, ok := results[c] + require.True(t, ok) + } + } + }) + } + fs.VerifyExpectations(t) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/common.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/common.go new file mode 100644 index 00000000000..cc5f010ce48 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/common.go @@ -0,0 +1,78 @@ +package requestvalidation + +import ( + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// ValidatePush validates a push request received from the peer that will send data +// Will succeed only if: +// - voucher has correct type +// - voucher references an active deal +// - referenced deal matches the given base CID +// - referenced deal is in an acceptable state +func ValidatePush( + deals PushDeals, + sender peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + Selector ipld.Node) error { + dealVoucher, ok := voucher.(*StorageDataTransferVoucher) + if !ok { + return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType) + } + + var deal storagemarket.MinerDeal + deal, err := deals.Get(dealVoucher.Proposal) + if err != nil { + return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) + } + + if !deal.Ref.Root.Equals(baseCid) { + return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", deal.Proposal.PieceCID.String(), baseCid.String(), ErrWrongPiece) + } + for _, state := range DataTransferStates { + if deal.State == state { + return nil + } + } + return xerrors.Errorf("Deal State %s: %w", storagemarket.DealStates[deal.State], ErrInacceptableDealState) +} + +// ValidatePull validates a pull request received from the peer that will receive data +// Will succeed only if: +// - voucher has correct type +// - voucher references an active deal +// - referenced deal matches the given base CID +// - referenced deal is in an acceptable state +func ValidatePull( + deals PullDeals, + receiver peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + Selector ipld.Node) error { + dealVoucher, ok := voucher.(*StorageDataTransferVoucher) + if !ok { + return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType) + } + deal, err := deals.Get(dealVoucher.Proposal) + if err != nil { + return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) + } + + if !deal.DataRef.Root.Equals(baseCid) { + return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", deal.Proposal.PieceCID.String(), baseCid.String(), ErrWrongPiece) + } + for _, state := range DataTransferStates { + if deal.State == state { + return nil + } + } + return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/doc.go new file mode 100644 index 00000000000..46d70bd902f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/doc.go @@ -0,0 +1,3 @@ +// Package requestvalidation implements a request validator for the data transfer module +// to validate data transfer requests for storage deals +package requestvalidation diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/request_validation_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/request_validation_test.go new file mode 100644 index 00000000000..9753e32499d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/request_validation_test.go @@ -0,0 +1,278 @@ +package requestvalidation_test + +import ( + "math/rand" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + dss "github.com/ipfs/go-datastore/sync" + blocksutil "github.com/ipfs/go-ipfs-blocksutil" + "github.com/libp2p/go-libp2p-core/peer" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-statestore" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + rv "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" +) + +var blockGenerator = blocksutil.NewBlockGenerator() + +type wrongDTType struct { +} + +func (wrongDTType) Type() datatransfer.TypeIdentifier { + return "WrongDTTYPE" +} + +func uniqueStorageDealProposal() (market.ClientDealProposal, error) { + clientAddr, err := address.NewIDAddress(uint64(rand.Int())) + if err != nil { + return market.ClientDealProposal{}, err + } + providerAddr, err := address.NewIDAddress(uint64(rand.Int())) + if err != nil { + return market.ClientDealProposal{}, err + } + return market.ClientDealProposal{ + Proposal: market.DealProposal{ + PieceCID: blockGenerator.Next().Cid(), + Client: clientAddr, + Provider: providerAddr, + }, + ClientSignature: crypto.Signature{ + Data: []byte("foo bar cat dog"), + Type: crypto.SigTypeBLS, + }, + }, nil +} + +func newClientDeal(minerID peer.ID, state storagemarket.StorageDealStatus) (storagemarket.ClientDeal, error) { + newProposal, err := uniqueStorageDealProposal() + if err != nil { + return storagemarket.ClientDeal{}, err + } + proposalNd, err := cborutil.AsIpld(&newProposal) + if err != nil { + return storagemarket.ClientDeal{}, err + } + minerAddr, err := address.NewIDAddress(uint64(rand.Int())) + if err != nil { + return storagemarket.ClientDeal{}, err + } + + return storagemarket.ClientDeal{ + ClientDealProposal: newProposal, + ProposalCid: proposalNd.Cid(), + DataRef: &storagemarket.DataRef{ + Root: blockGenerator.Next().Cid(), + }, + Miner: minerID, + MinerWorker: minerAddr, + State: state, + }, nil +} + +func newMinerDeal(clientID peer.ID, state storagemarket.StorageDealStatus) (storagemarket.MinerDeal, error) { + newProposal, err := uniqueStorageDealProposal() + if err != nil { + return storagemarket.MinerDeal{}, err + } + proposalNd, err := cborutil.AsIpld(&newProposal) + if err != nil { + return storagemarket.MinerDeal{}, err + } + ref := blockGenerator.Next().Cid() + + return storagemarket.MinerDeal{ + ClientDealProposal: newProposal, + ProposalCid: proposalNd.Cid(), + Client: clientID, + State: state, + Ref: &storagemarket.DataRef{Root: ref}, + }, nil +} + +type pushDeals struct { + state *statestore.StateStore +} + +func (pd *pushDeals) Get(proposalCid cid.Cid) (storagemarket.MinerDeal, error) { + var deal storagemarket.MinerDeal + err := pd.state.Get(proposalCid).Get(&deal) + return deal, err +} + +type pullDeals struct { + state *statestore.StateStore +} + +func (pd *pullDeals) Get(proposalCid cid.Cid) (storagemarket.ClientDeal, error) { + var deal storagemarket.ClientDeal + err := pd.state.Get(proposalCid).Get(&deal) + return deal, err +} + +func TestUnifiedRequestValidator(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + state := statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client"))) + minerID := peer.ID("fakepeerid") + clientID := peer.ID("fakepeerid2") + block := blockGenerator.Next() + + t.Run("which only accepts pulls", func(t *testing.T) { + urv := rv.NewUnifiedRequestValidator(nil, &pullDeals{state}) + + t.Run("ValidatePush fails", func(t *testing.T) { + _, err := urv.ValidatePush(false, datatransfer.ChannelID{}, minerID, wrongDTType{}, block.Cid(), nil) + if !xerrors.Is(err, rv.ErrNoPushAccepted) { + t.Fatal("Push should fail for the client request validator for storage deals") + } + }) + + AssertValidatesPulls(t, urv, minerID, state) + }) + + t.Run("which only accepts pushes", func(t *testing.T) { + urv := rv.NewUnifiedRequestValidator(&pushDeals{state}, nil) + + t.Run("ValidatePull fails", func(t *testing.T) { + _, err := urv.ValidatePull(false, datatransfer.ChannelID{}, clientID, wrongDTType{}, block.Cid(), nil) + if !xerrors.Is(err, rv.ErrNoPullAccepted) { + t.Fatal("Pull should fail for the provider request validator for storage deals") + } + }) + + AssertPushValidator(t, urv, clientID, state) + }) + + t.Run("which accepts pushes and pulls", func(t *testing.T) { + urv := rv.NewUnifiedRequestValidator(&pushDeals{state}, &pullDeals{state}) + + AssertValidatesPulls(t, urv, minerID, state) + AssertPushValidator(t, urv, clientID, state) + }) +} + +func AssertPushValidator(t *testing.T, validator datatransfer.RequestValidator, sender peer.ID, state *statestore.StateStore) { + t.Run("ValidatePush fails deal not found", func(t *testing.T) { + proposal, err := uniqueStorageDealProposal() + if err != nil { + t.Fatal("error creating proposal") + } + proposalNd, err := cborutil.AsIpld(&proposal) + if err != nil { + t.Fatal("error serializing proposal") + } + _, err = validator.ValidatePush(false, datatransfer.ChannelID{}, sender, &rv.StorageDataTransferVoucher{proposalNd.Cid()}, proposal.Proposal.PieceCID, nil) + if !xerrors.Is(err, rv.ErrNoDeal) { + t.Fatal("Push should fail if there is no deal stored") + } + }) + t.Run("ValidatePush fails wrong piece ref", func(t *testing.T) { + minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealProposalAccepted) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { + t.Fatal("deal tracking failed") + } + _, err = validator.ValidatePush(false, datatransfer.ChannelID{}, sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, blockGenerator.Next().Cid(), nil) + if !xerrors.Is(err, rv.ErrWrongPiece) { + t.Fatal("Push should fail if piece ref is incorrect") + } + }) + t.Run("ValidatePush fails wrong deal state", func(t *testing.T) { + minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealActive) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { + t.Fatal("deal tracking failed") + } + ref := minerDeal.Ref + _, err = validator.ValidatePush(false, datatransfer.ChannelID{}, sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, ref.Root, nil) + if !xerrors.Is(err, rv.ErrInacceptableDealState) { + t.Fatal("Push should fail if deal is in a state that cannot be data transferred") + } + }) + t.Run("ValidatePush succeeds", func(t *testing.T) { + minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealValidating) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { + t.Fatal("deal tracking failed") + } + ref := minerDeal.Ref + _, err = validator.ValidatePush(false, datatransfer.ChannelID{}, sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, ref.Root, nil) + if err != nil { + t.Fatal("Push should should succeed when all parameters are correct") + } + }) +} + +func AssertValidatesPulls(t *testing.T, validator datatransfer.RequestValidator, receiver peer.ID, state *statestore.StateStore) { + t.Run("ValidatePull fails deal not found", func(t *testing.T) { + proposal, err := uniqueStorageDealProposal() + if err != nil { + t.Fatal("error creating proposal") + } + proposalNd, err := cborutil.AsIpld(&proposal) + if err != nil { + t.Fatal("error serializing proposal") + } + _, err = validator.ValidatePull(false, datatransfer.ChannelID{}, receiver, &rv.StorageDataTransferVoucher{proposalNd.Cid()}, proposal.Proposal.PieceCID, nil) + if !xerrors.Is(err, rv.ErrNoDeal) { + t.Fatal("Pull should fail if there is no deal stored") + } + }) + t.Run("ValidatePull fails wrong piece ref", func(t *testing.T) { + clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealProposalAccepted) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { + t.Fatal("deal tracking failed") + } + _, err = validator.ValidatePull(false, datatransfer.ChannelID{}, receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, blockGenerator.Next().Cid(), nil) + if !xerrors.Is(err, rv.ErrWrongPiece) { + t.Fatal("Pull should fail if piece ref is incorrect") + } + }) + t.Run("ValidatePull fails wrong deal state", func(t *testing.T) { + clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealActive) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { + t.Fatal("deal tracking failed") + } + payloadCid := clientDeal.DataRef.Root + _, err = validator.ValidatePull(false, datatransfer.ChannelID{}, receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, payloadCid, nil) + if !xerrors.Is(err, rv.ErrInacceptableDealState) { + t.Fatal("Pull should fail if deal is in a state that cannot be data transferred") + } + }) + t.Run("ValidatePull succeeds", func(t *testing.T) { + clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealValidating) + if err != nil { + t.Fatal("error creating client deal") + } + if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { + t.Fatal("deal tracking failed") + } + payloadCid := clientDeal.DataRef.Root + _, err = validator.ValidatePull(false, datatransfer.ChannelID{}, receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, payloadCid, nil) + if err != nil { + t.Fatal("Pull should should succeed when all parameters are correct") + } + }) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types.go new file mode 100644 index 00000000000..eb32b1eb957 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types.go @@ -0,0 +1,55 @@ +package requestvalidation + +import ( + "errors" + + "github.com/ipfs/go-cid" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +//go:generate cbor-gen-for StorageDataTransferVoucher + +var ( + // ErrWrongVoucherType means the voucher was not the correct type can validate against + ErrWrongVoucherType = errors.New("cannot validate voucher type") + + // ErrNoPushAccepted just means clients do not accept pushes for storage deals + ErrNoPushAccepted = errors.New("client should not receive data for a storage deal") + + // ErrNoPullAccepted just means providers do not accept pulls for storage deals + ErrNoPullAccepted = errors.New("provider should not send data for a storage deal") + + // ErrNoDeal means no active deal was found for this vouchers proposal cid + ErrNoDeal = errors.New("no deal found for this proposal") + + // ErrWrongPeer means that the other peer for this data transfer request does not match + // the other peer for the deal + ErrWrongPeer = errors.New("data Transfer peer id and Deal peer id do not match") + + // ErrWrongPiece means that the pieceref for this data transfer request does not match + // the one specified in the deal + ErrWrongPiece = errors.New("base CID for deal does not match CID for piece") + + // ErrInacceptableDealState means the deal for this transfer is not in a deal state + // where transfer can be performed + ErrInacceptableDealState = errors.New("deal is not in a state where deals are accepted") + + // DataTransferStates are the states in which it would make sense to actually start a data transfer + // We accept deals even in the StorageDealTransferring state too as we could also also receive a data transfer restart request + DataTransferStates = []storagemarket.StorageDealStatus{storagemarket.StorageDealValidating, storagemarket.StorageDealWaitingForData, storagemarket.StorageDealUnknown, + storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferAwaitRestart} +) + +// StorageDataTransferVoucher is the voucher type for data transfers +// used by the storage market +type StorageDataTransferVoucher struct { + Proposal cid.Cid +} + +// Type is the unique string identifier for a StorageDataTransferVoucher +func (dv *StorageDataTransferVoucher) Type() datatransfer.TypeIdentifier { + return "StorageDataTransferVoucher" +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types_cbor_gen.go new file mode 100644 index 00000000000..04419da1508 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/types_cbor_gen.go @@ -0,0 +1,80 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package requestvalidation + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufStorageDataTransferVoucher = []byte{129} + +func (t *StorageDataTransferVoucher) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufStorageDataTransferVoucher); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + return nil +} + +func (t *StorageDataTransferVoucher) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageDataTransferVoucher{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/unified_request_validator.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/unified_request_validator.go new file mode 100644 index 00000000000..ab36622c2b2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/requestvalidation/unified_request_validator.go @@ -0,0 +1,72 @@ +package requestvalidation + +import ( + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// PushDeals gets deal states for Push validations +type PushDeals interface { + Get(cid.Cid) (storagemarket.MinerDeal, error) +} + +// PullDeals gets deal states for Pull validations +type PullDeals interface { + Get(cid.Cid) (storagemarket.ClientDeal, error) +} + +// UnifiedRequestValidator is a data transfer request validator that validates +// StorageDataTransferVoucher from the given state store +// It can be made to only accept push requests (Provider) or pull requests (Client) +// by passing nil for the statestore value for pushes or pulls +type UnifiedRequestValidator struct { + pushDeals PushDeals + pullDeals PullDeals +} + +// NewUnifiedRequestValidator returns a new instance of UnifiedRequestValidator +func NewUnifiedRequestValidator(pushDeals PushDeals, pullDeals PullDeals) *UnifiedRequestValidator { + return &UnifiedRequestValidator{ + pushDeals: pushDeals, + pullDeals: pullDeals, + } +} + +// SetPushDeals sets the store to look up push deals with +func (v *UnifiedRequestValidator) SetPushDeals(pushDeals PushDeals) { + v.pushDeals = pushDeals +} + +// SetPullDeals sets the store to look up pull deals with +func (v *UnifiedRequestValidator) SetPullDeals(pullDeals PullDeals) { + v.pullDeals = pullDeals +} + +// ValidatePush implements the ValidatePush method of a data transfer request validator. +// If no pushStore exists, it rejects the request +// Otherwise, it calls the ValidatePush function to validate the deal +func (v *UnifiedRequestValidator) ValidatePush(isRestart bool, _ datatransfer.ChannelID, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + if v.pushDeals == nil { + return nil, ErrNoPushAccepted + } + + return nil, ValidatePush(v.pushDeals, sender, voucher, baseCid, selector) +} + +// ValidatePull implements the ValidatePull method of a data transfer request validator. +// If no pullStore exists, it rejects the request +// Otherwise, it calls the ValidatePull function to validate the deal +func (v *UnifiedRequestValidator) ValidatePull(isRestart bool, _ datatransfer.ChannelID, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + if v.pullDeals == nil { + return nil, ErrNoPullAccepted + } + + return nil, ValidatePull(v.pullDeals, receiver, voucher, baseCid, selector) +} + +var _ datatransfer.RequestValidator = &UnifiedRequestValidator{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask.go new file mode 100644 index 00000000000..96e08bf31f3 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask.go @@ -0,0 +1,208 @@ +package storedask + +import ( + "bytes" + "context" + "sync" + + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + versionedds "github.com/filecoin-project/go-ds-versioning/pkg/datastore" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +var log = logging.Logger("storedask") + +// DefaultPrice is the default price for unverified deals (in attoFil / GiB / Epoch) +var DefaultPrice = abi.NewTokenAmount(500000000) + +// DefaultVerifiedPrice is the default price for verified deals (in attoFil / GiB / Epoch) +var DefaultVerifiedPrice = abi.NewTokenAmount(50000000) + +// DefaultDuration is the default number of epochs a storage ask is in effect for +const DefaultDuration abi.ChainEpoch = 1000000 + +// DefaultMinPieceSize is the minimum accepted piece size for data +const DefaultMinPieceSize abi.PaddedPieceSize = 256 + +// DefaultMaxPieceSize is the default maximum accepted size for pieces for deals +// TODO: It would be nice to default this to the miner's sector size +const DefaultMaxPieceSize abi.PaddedPieceSize = 1 << 20 + +// StoredAsk implements a persisted SignedStorageAsk that lasts through restarts +// It also maintains a cache of the current SignedStorageAsk in memory +type StoredAsk struct { + askLk sync.RWMutex + ask *storagemarket.SignedStorageAsk + ds datastore.Batching + dsKey datastore.Key + spn storagemarket.StorageProviderNode + actor address.Address +} + +// NewStoredAsk returns a new instance of StoredAsk +// It will initialize a new SignedStorageAsk on disk if one is not set +// Otherwise it loads the current SignedStorageAsk from disk +func NewStoredAsk(ds datastore.Batching, dsKey datastore.Key, spn storagemarket.StorageProviderNode, actor address.Address, + opts ...storagemarket.StorageAskOption) (*StoredAsk, error) { + s := &StoredAsk{ + spn: spn, + actor: actor, + dsKey: dsKey, + } + + askMigrations, err := versioned.BuilderList{ + versioned.NewVersionedBuilder(migrations.GetMigrateSignedStorageAsk0To1(s.sign), versioning.VersionKey("1")), + }.Build() + + if err != nil { + return nil, err + } + + versionedDs, migrateDs := versionedds.NewVersionedDatastore(ds, askMigrations, versioning.VersionKey("1")) + + // TODO: this is a bit risky -- but this is just a single key so it's probably ok to run migrations in the constructor + err = migrateDs(context.TODO()) + if err != nil { + return nil, err + } + + s.ds = versionedDs + + if err := s.tryLoadAsk(); err != nil { + return nil, err + } + + if s.ask == nil { + // TODO: we should be fine with this state, and just say it means 'not actively accepting deals' + // for now... lets just set a price + if err := s.SetAsk(DefaultPrice, DefaultVerifiedPrice, DefaultDuration, opts...); err != nil { + return nil, xerrors.Errorf("failed setting a default price: %w", err) + } + } + return s, nil +} + +// SetAsk configures the storage miner's ask with the provided prices (for unverified and verified deals), +// duration, and options. Any previously-existing ask is replaced. If no options are passed to configure +// MinPieceSize and MaxPieceSize, the previous ask's values will be used, if available. +// It also increments the sequence number on the ask +func (s *StoredAsk) SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error { + s.askLk.Lock() + defer s.askLk.Unlock() + var seqno uint64 + minPieceSize := DefaultMinPieceSize + maxPieceSize := DefaultMaxPieceSize + if s.ask != nil { + seqno = s.ask.Ask.SeqNo + 1 + minPieceSize = s.ask.Ask.MinPieceSize + maxPieceSize = s.ask.Ask.MaxPieceSize + } + + ctx := context.TODO() + + _, height, err := s.spn.GetChainHead(ctx) + if err != nil { + return err + } + ask := &storagemarket.StorageAsk{ + Price: price, + VerifiedPrice: verifiedPrice, + Timestamp: height, + Expiry: height + duration, + Miner: s.actor, + SeqNo: seqno, + MinPieceSize: minPieceSize, + MaxPieceSize: maxPieceSize, + } + + for _, option := range options { + option(ask) + } + + sig, err := s.sign(ctx, ask) + if err != nil { + return err + } + return s.saveAsk(&storagemarket.SignedStorageAsk{ + Ask: ask, + Signature: sig, + }) + +} + +func (s *StoredAsk) sign(ctx context.Context, ask *storagemarket.StorageAsk) (*crypto.Signature, error) { + tok, _, err := s.spn.GetChainHead(ctx) + if err != nil { + return nil, err + } + + return providerutils.SignMinerData(ctx, ask, s.actor, tok, s.spn.GetMinerWorkerAddress, s.spn.SignBytes) +} + +// GetAsk returns the current signed storage ask, or nil if one does not exist. +func (s *StoredAsk) GetAsk() *storagemarket.SignedStorageAsk { + s.askLk.RLock() + defer s.askLk.RUnlock() + if s.ask == nil { + return nil + } + ask := *s.ask + return &ask +} + +func (s *StoredAsk) tryLoadAsk() error { + s.askLk.Lock() + defer s.askLk.Unlock() + + err := s.loadAsk() + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + log.Warn("no previous ask found, miner will not accept deals until a price is set") + return nil + } + return err + } + + return nil +} + +func (s *StoredAsk) loadAsk() error { + askb, err := s.ds.Get(context.TODO(), s.dsKey) + if err != nil { + return xerrors.Errorf("failed to load most recent ask from disk: %w", err) + } + + var ssa storagemarket.SignedStorageAsk + if err := cborutil.ReadCborRPC(bytes.NewReader(askb), &ssa); err != nil { + return err + } + + s.ask = &ssa + return nil +} + +func (s *StoredAsk) saveAsk(a *storagemarket.SignedStorageAsk) error { + b, err := cborutil.Dump(a) + if err != nil { + return err + } + + if err := s.ds.Put(context.TODO(), s.dsKey, b); err != nil { + return err + } + + s.ask = a + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask_test.go new file mode 100644 index 00000000000..1c630a0381e --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/impl/storedask/storedask_test.go @@ -0,0 +1,183 @@ +package storedask_test + +import ( + "bytes" + "context" + "errors" + "math/rand" + "testing" + + "github.com/ipfs/go-datastore" + dss "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +func TestStoredAsk(t *testing.T) { + ds := dss.MutexWrap(datastore.NewMapDatastore()) + spn := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: testnodes.NewStorageMarketState(), + }, + } + actor := address.TestAddress2 + storedAsk, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor) + require.NoError(t, err) + + testPrice := abi.NewTokenAmount(1000000000) + testVerifiedPrice := abi.NewTokenAmount(100000000) + testDuration := abi.ChainEpoch(200) + t.Run("auto initializing", func(t *testing.T) { + ask := storedAsk.GetAsk() + require.NotNil(t, ask) + }) + t.Run("setting ask price", func(t *testing.T) { + minPieceSize := abi.PaddedPieceSize(1024) + err := storedAsk.SetAsk(testPrice, testVerifiedPrice, testDuration, storagemarket.MinPieceSize(minPieceSize)) + require.NoError(t, err) + ask := storedAsk.GetAsk() + require.Equal(t, ask.Ask.Price, testPrice) + require.Equal(t, ask.Ask.Expiry-ask.Ask.Timestamp, testDuration) + require.Equal(t, ask.Ask.MinPieceSize, minPieceSize) + }) + t.Run("reloading stored ask from disk", func(t *testing.T) { + storedAsk2, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor) + require.NoError(t, err) + ask := storedAsk2.GetAsk() + require.Equal(t, ask.Ask.Price, testPrice) + require.Equal(t, ask.Ask.VerifiedPrice, testVerifiedPrice) + require.Equal(t, ask.Ask.Expiry-ask.Ask.Timestamp, testDuration) + }) + + t.Run("node errors", func(t *testing.T) { + spnStateIDErr := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + GetChainHeadError: errors.New("something went wrong"), + SMState: testnodes.NewStorageMarketState(), + }, + } + // should load cause ask is is still in data store + storedAskError, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnStateIDErr, actor) + require.NoError(t, err) + err = storedAskError.SetAsk(testPrice, testVerifiedPrice, testDuration) + require.Error(t, err) + + spnMinerWorkerErr := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: testnodes.NewStorageMarketState(), + }, + MinerWorkerError: errors.New("something went wrong"), + } + // should load cause ask is is still in data store + storedAskError, err = storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnMinerWorkerErr, actor) + require.NoError(t, err) + err = storedAskError.SetAsk(testPrice, testVerifiedPrice, testDuration) + require.Error(t, err) + + spnSignBytesErr := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: testnodes.NewStorageMarketState(), + SignBytesError: errors.New("something went wrong"), + }, + } + // should load cause ask is is still in data store + storedAskError, err = storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnSignBytesErr, actor) + require.NoError(t, err) + err = storedAskError.SetAsk(testPrice, testVerifiedPrice, testDuration) + require.Error(t, err) + }) +} + +func TestPieceSizeLimits(t *testing.T) { + // create ask with options + ds := dss.MutexWrap(datastore.NewMapDatastore()) + spn := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: testnodes.NewStorageMarketState(), + }, + } + actor := address.TestAddress2 + min := abi.PaddedPieceSize(1024) + max := abi.PaddedPieceSize(4096) + sa, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor, storagemarket.MinPieceSize(min), storagemarket.MaxPieceSize(max)) + require.NoError(t, err) + ask := sa.GetAsk() + require.EqualValues(t, min, ask.Ask.MinPieceSize) + require.EqualValues(t, max, ask.Ask.MaxPieceSize) + + // SetAsk should not clobber previously-set options + require.NoError(t, sa.SetAsk(ask.Ask.Price, ask.Ask.VerifiedPrice, ask.Ask.Expiry)) + require.NoError(t, err) + ask = sa.GetAsk() + require.EqualValues(t, min, ask.Ask.MinPieceSize) + require.EqualValues(t, max, ask.Ask.MaxPieceSize) + + // now change the size limits via set ask + testPrice := abi.NewTokenAmount(1000000000) + testVerifiedPrice := abi.NewTokenAmount(100000000) + testDuration := abi.ChainEpoch(200) + newMin := abi.PaddedPieceSize(150) + newMax := abi.PaddedPieceSize(12345) + require.NoError(t, sa.SetAsk(testPrice, testVerifiedPrice, testDuration, storagemarket.MinPieceSize(newMin), storagemarket.MaxPieceSize(newMax))) + + // call get + ask = sa.GetAsk() + require.EqualValues(t, newMin, ask.Ask.MinPieceSize) + require.EqualValues(t, newMax, ask.Ask.MaxPieceSize) +} + +func TestMigrations(t *testing.T) { + ctx := context.Background() + ds := dss.MutexWrap(datastore.NewMapDatastore()) + spn := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: testnodes.NewStorageMarketState(), + }, + } + actor := address.TestAddress2 + oldAsk := &migrations.StorageAsk0{ + Price: abi.NewTokenAmount(rand.Int63()), + VerifiedPrice: abi.NewTokenAmount(rand.Int63()), + MinPieceSize: abi.PaddedPieceSize(rand.Uint64()), + MaxPieceSize: abi.PaddedPieceSize(rand.Uint64()), + Miner: address.TestAddress2, + Timestamp: abi.ChainEpoch(rand.Int63()), + Expiry: abi.ChainEpoch(rand.Int63()), + SeqNo: rand.Uint64(), + } + tok, _, err := spn.GetChainHead(ctx) + require.NoError(t, err) + sig, err := providerutils.SignMinerData(ctx, oldAsk, actor, tok, spn.GetMinerWorkerAddress, spn.SignBytes) + require.NoError(t, err) + oldSignedAsk := &migrations.SignedStorageAsk0{ + Ask: oldAsk, + Signature: sig, + } + buf := new(bytes.Buffer) + err = oldSignedAsk.MarshalCBOR(buf) + require.NoError(t, err) + err = ds.Put(ctx, datastore.NewKey("latest-ask"), buf.Bytes()) + require.NoError(t, err) + storedAsk, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor) + require.NoError(t, err) + ask := storedAsk.GetAsk() + expectedAsk := &storagemarket.StorageAsk{ + Price: oldAsk.Price, + VerifiedPrice: oldAsk.VerifiedPrice, + MinPieceSize: oldAsk.MinPieceSize, + MaxPieceSize: oldAsk.MaxPieceSize, + Miner: oldAsk.Miner, + Timestamp: oldAsk.Timestamp, + Expiry: oldAsk.Expiry, + SeqNo: oldAsk.SeqNo, + } + require.Equal(t, expectedAsk, ask.Ask) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/integration_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/integration_test.go new file mode 100644 index 00000000000..3b27f60cef1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/integration_test.go @@ -0,0 +1,1084 @@ +package storagemarket_test + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/ipfs/go-datastore" + "github.com/ipld/go-car" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-data-transfer/channelmonitor" + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + dtnet "github.com/filecoin-project/go-data-transfer/network" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" +) + +var noOpDelay = testnodes.DelayFakeCommonNode{} + +func TestMakeDeal(t *testing.T) { + fixtureFiles := []string{"payload.txt", "duplicate_blocks.txt"} + + ctx := context.Background() + testCases := map[string]struct { + useStore bool + disableNewDeals bool + }{ + "with stores": { + useStore: true, + }, + "with just blockstore": { + useStore: false, + }, + "disable new protocols": { + useStore: true, + disableNewDeals: true, + }, + } + + for _, fileName := range fixtureFiles { + for testCase, data := range testCases { + t.Run(testCase+"-"+filepath.Base(fileName), func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + h := testharness.NewHarness(t, ctx, data.useStore, noOpDelay, noOpDelay, data.disableNewDeals, fileName) + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + // set up a subscriber + providerDealChan := make(chan storagemarket.MinerDeal) + var checkedUnmarshalling bool + subscriber := func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if !checkedUnmarshalling { + // test that deal created can marshall and unmarshalled + jsonBytes, err := json.Marshal(deal) + require.NoError(t, err) + var unmDeal storagemarket.MinerDeal + err = json.Unmarshal(jsonBytes, &unmDeal) + require.NoError(t, err) + checkedUnmarshalling = true + } + providerDealChan <- deal + } + _ = h.Provider.SubscribeToEvents(subscriber) + + clientDealChan := make(chan storagemarket.ClientDeal) + clientSubscriber := func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + clientDealChan <- deal + } + _ = h.Client.SubscribeToEvents(clientSubscriber) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + assert.NoError(t, err) + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, true, false) + proposalCid := result.ProposalCid + + var providerSeenDeal storagemarket.MinerDeal + var clientSeenDeal storagemarket.ClientDeal + var providerstates, clientstates []storagemarket.StorageDealStatus + for providerSeenDeal.State != storagemarket.StorageDealExpired || + clientSeenDeal.State != storagemarket.StorageDealExpired { + select { + case <-ctx.Done(): + t.Fatalf(`did not see all states before context closed + saw client: %v, + saw provider: %v`, dealStatesToStrings(clientstates), dealStatesToStrings(providerstates)) + case clientSeenDeal = <-clientDealChan: + if len(clientstates) == 0 || clientSeenDeal.State != clientstates[len(clientstates)-1] { + clientstates = append(clientstates, clientSeenDeal.State) + } + case providerSeenDeal = <-providerDealChan: + if len(providerstates) == 0 || providerSeenDeal.State != providerstates[len(providerstates)-1] { + providerstates = append(providerstates, providerSeenDeal.State) + } + } + } + + expProviderStates := []storagemarket.StorageDealStatus{ + storagemarket.StorageDealValidating, + storagemarket.StorageDealAcceptWait, + storagemarket.StorageDealWaitingForData, + storagemarket.StorageDealTransferring, + storagemarket.StorageDealVerifyData, + storagemarket.StorageDealReserveProviderFunds, + storagemarket.StorageDealPublish, + storagemarket.StorageDealPublishing, + storagemarket.StorageDealStaged, + storagemarket.StorageDealAwaitingPreCommit, + storagemarket.StorageDealSealing, + storagemarket.StorageDealFinalizing, + storagemarket.StorageDealActive, + storagemarket.StorageDealExpired, + } + + expClientStates := []storagemarket.StorageDealStatus{ + storagemarket.StorageDealReserveClientFunds, + //storagemarket.StorageDealClientFunding, // skipped because funds available + storagemarket.StorageDealFundsReserved, + storagemarket.StorageDealStartDataTransfer, + storagemarket.StorageDealTransferQueued, + storagemarket.StorageDealTransferring, + storagemarket.StorageDealCheckForAcceptance, + storagemarket.StorageDealProposalAccepted, + storagemarket.StorageDealAwaitingPreCommit, + storagemarket.StorageDealSealing, + storagemarket.StorageDealActive, + storagemarket.StorageDealExpired, + } + + assert.Equal(t, dealStatesToStrings(expProviderStates), dealStatesToStrings(providerstates)) + assert.Equal(t, dealStatesToStrings(expClientStates), dealStatesToStrings(clientstates)) + + // check a couple of things to make sure we're getting the whole deal + assert.Equal(t, h.TestData.Host1.ID(), providerSeenDeal.Client) + assert.Empty(t, providerSeenDeal.Message) + assert.Equal(t, proposalCid, providerSeenDeal.ProposalCid) + assert.Equal(t, h.ProviderAddr, providerSeenDeal.ClientDealProposal.Proposal.Provider) + + cd, err := h.Client.GetLocalDeal(ctx, proposalCid) + assert.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) + assert.True(t, cd.FastRetrieval) + + providerDeals, err := h.Provider.ListLocalDeals() + assert.NoError(t, err) + + pd := providerDeals[0] + assert.Equal(t, proposalCid, pd.ProposalCid) + assert.True(t, pd.FastRetrieval) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) + + dl, err := h.Provider.GetLocalDeal(pd.ProposalCid) + require.NoError(t, err) + assert.True(t, dl.FastRetrieval) + + // test out query protocol + status, err := h.Client.GetProviderDealState(ctx, proposalCid) + assert.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, status.State) + assert.True(t, status.FastRetrieval) + + // ensure that the handoff has fast retrieval info + assert.Len(t, h.ProviderNode.OnDealCompleteCalls, 1) + assert.True(t, h.ProviderNode.OnDealCompleteCalls[0].FastRetrieval) + h.ClientNode.VerifyExpectations(t) + + // ensure reference provider was called + notifs := h.ReferenceProvider.GetNotifs() + require.Len(t, notifs, 1) + _, ok := notifs[string(proposalCid.Bytes())] + require.True(t, ok) + }) + } + } +} + +func TestMakeDealOffline(t *testing.T) { + fixtureFiles := []string{ + filepath.Join(shared_testutil.ThisDir(t), "./fixtures/payload.txt"), + filepath.Join(shared_testutil.ThisDir(t), "./fixtures/duplicate_blocks.txt"), + } + + for _, file := range fixtureFiles { + t.Run(filepath.Base(file), func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + h := testharness.NewHarness(t, ctx, true, noOpDelay, noOpDelay, false) + + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + commP, size, err := clientutils.CommP(ctx, h.Data, &storagemarket.DataRef{ + // hacky but need it for now because if it's manual, we wont get a CommP. + TransferType: storagemarket.TTGraphsync, + Root: h.PayloadCid, + }, 2<<29) + require.NoError(t, err) + + dataRef := &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: h.PayloadCid, + PieceCid: &commP, + PieceSize: size, + } + + result := h.ProposeStorageDeal(t, dataRef, false, false) + proposalCid := result.ProposalCid + + wg := sync.WaitGroup{} + + h.WaitForClientEvent(&wg, storagemarket.ClientEventDataTransferComplete) + h.WaitForProviderEvent(&wg, storagemarket.ProviderEventDataRequested) + waitGroupWait(ctx, &wg) + + cd, err := h.Client.GetLocalDeal(ctx, proposalCid) + assert.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ = h.Client.GetLocalDeal(ctx, proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 1*time.Second, 100*time.Millisecond, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + providerDeals, err := h.Provider.ListLocalDeals() + assert.NoError(t, err) + + pd := providerDeals[0] + assert.True(t, pd.ProposalCid.Equals(proposalCid)) + shared_testutil.AssertDealState(t, storagemarket.StorageDealWaitingForData, pd.State) + + // Do a selective CARv1 traversal on the CARv2 file to get a + // deterministic CARv1 that we can import on the miner side. + sc := car.NewSelectiveCar(ctx, h.Data, []car.Dag{{Root: h.PayloadCid, Selector: selectorparse.CommonSelector_ExploreAllRecursively}}) + prepared, err := sc.Prepare() + require.NoError(t, err) + carBuf := new(bytes.Buffer) + require.NoError(t, prepared.Write(carBuf)) + + err = h.Provider.ImportDataForDeal(ctx, pd.ProposalCid, carBuf) + require.NoError(t, err) + + h.WaitForClientEvent(&wg, storagemarket.ClientEventDealExpired) + h.WaitForProviderEvent(&wg, storagemarket.ProviderEventDealExpired) + waitGroupWait(ctx, &wg) + + require.Eventually(t, func() bool { + cd, err = h.Client.GetLocalDeal(ctx, proposalCid) + if err != nil { + return false + } + if cd.State != storagemarket.StorageDealExpired { + return false + } + + providerDeals, err = h.Provider.ListLocalDeals() + if err != nil { + return false + } + + pd = providerDeals[0] + if !pd.ProposalCid.Equals(proposalCid) { + return false + } + + if pd.State != storagemarket.StorageDealExpired { + return false + } + return true + }, 5*time.Second, 500*time.Millisecond) + + cd, err = h.Client.GetLocalDeal(ctx, proposalCid) + assert.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) + + providerDeals, err = h.Provider.ListLocalDeals() + assert.NoError(t, err) + + pd = providerDeals[0] + assert.True(t, pd.ProposalCid.Equals(proposalCid)) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) + }) + } +} + +func TestMakeDealNonBlocking(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + h := testharness.NewHarness(t, ctx, true, noOpDelay, noOpDelay, false) + + testCids := shared_testutil.GenerateCids(2) + + h.ProviderNode.WaitForMessageBlocks = true + h.ProviderNode.AddFundsCid = testCids[1] + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + + h.ClientNode.AddFundsCid = testCids[0] + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + + wg := sync.WaitGroup{} + h.WaitForClientEvent(&wg, storagemarket.ClientEventDataTransferComplete) + h.WaitForProviderEvent(&wg, storagemarket.ProviderEventFundingInitiated) + waitGroupWait(ctx, &wg) + + cd, err := h.Client.GetLocalDeal(ctx, result.ProposalCid) + assert.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, cd.State) + + providerDeals, err := h.Provider.ListLocalDeals() + assert.NoError(t, err) + + // Provider should be blocking on waiting for funds to appear on chain + pd := providerDeals[0] + assert.Equal(t, result.ProposalCid, pd.ProposalCid) + require.Eventually(t, func() bool { + providerDeals, err := h.Provider.ListLocalDeals() + assert.NoError(t, err) + pd = providerDeals[0] + return pd.State == storagemarket.StorageDealProviderFunding + }, 1*time.Second, 100*time.Millisecond, "actual deal status is %s", storagemarket.DealStates[pd.State]) +} + +// TestRestartOnlyProviderDataTransfer tests that when the provider is shut +// down, the connection is broken and then the provider is restarted, the +// data transfer will resume and the deal will complete successfully. +func TestRestartOnlyProviderDataTransfer(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + // Configure data-transfer to retry connection + dtClientNetRetry := dtnet.RetryParameters(time.Second, time.Second, 5, 1) + td := shared_testutil.NewLibp2pTestData(ctx, t) + td.DTNet1 = dtnet.NewFromLibp2pHost(td.Host1, dtClientNetRetry) + + // Configure data-transfer to restart after stalling + restartConf := dtimpl.ChannelRestartConfig(channelmonitor.Config{ + AcceptTimeout: 100 * time.Millisecond, + RestartBackoff: 100 * time.Millisecond, + RestartDebounce: 100 * time.Millisecond, + MaxConsecutiveRestarts: 5, + CompleteTimeout: 100 * time.Millisecond, + }) + smState := testnodes.NewStorageMarketState() + depGen := dependencies.NewDepGenerator() + depGen.ClientNewDataTransfer = func(ds datastore.Batching, dir string, transferNetwork dtnet.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) { + return dtimpl.NewDataTransfer(ds, transferNetwork, transport, restartConf) + } + deps := depGen.New(t, ctx, td, smState, "", noOpDelay, noOpDelay) + h := testharness.NewHarnessWithTestData(t, td, deps, true, false) + + client := h.Client + host1 := h.TestData.Host1 + host2 := h.TestData.Host2 + + // start client and provider + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + require.NoError(t, err) + + //h.DTClient.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + // fmt.Printf("dt-clnt %s: %s %s\n", datatransfer.Events[event.Code], datatransfer.Statuses[channelState.Status()], channelState.Message()) + //}) + //h.DTProvider.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + // fmt.Printf("dt-prov %s: %s %s\n", datatransfer.Events[event.Code], datatransfer.Statuses[channelState.Status()], channelState.Message()) + //}) + // + //_ = client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + // fmt.Printf("%s: %s %s\n", storagemarket.ClientEvents[event], storagemarket.DealStates[deal.State], deal.Message) + //}) + //_ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + // fmt.Printf("Provider %s: %s\n", storagemarket.ProviderEvents[event], storagemarket.DealStates[deal.State]) + //}) + + // wait for provider to enter deal transferring state and stop + wg := sync.WaitGroup{} + wg.Add(1) + var providerState []storagemarket.MinerDeal + h.DTClient.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + if event.Code == datatransfer.Accept { + t.Log("client has accepted data-transfer query, shutting down provider") + + require.NoError(t, h.TestData.MockNet.UnlinkPeers(host1.ID(), host2.ID())) + require.NoError(t, h.TestData.MockNet.DisconnectPeers(host1.ID(), host2.ID())) + require.NoError(t, h.Provider.Stop()) + + // deal could have expired already on the provider side for the `ClientEventDealAccepted` event + // so, we should wait on the `ProviderEventDealExpired` event ONLY if the deal has not expired. + providerState, err = h.Provider.ListLocalDeals() + assert.NoError(t, err) + wg.Done() + } + }) + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + proposalCid := result.ProposalCid + t.Log("storage deal proposed") + + waitGroupWait(ctx, &wg) + t.Log("provider has been shutdown the first time") + + // Assert client state + cd, err := client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + t.Logf("client state after stopping is %s", storagemarket.DealStates[cd.State]) + require.True(t, cd.State == storagemarket.StorageDealStartDataTransfer || cd.State == storagemarket.StorageDealTransferring) + + // Create new provider (but don't restart yet) + newProvider := h.CreateNewProvider(t, ctx, h.TestData) + + t.Logf("provider state after stopping is %s", storagemarket.DealStates[providerState[0].State]) + require.Equal(t, storagemarket.StorageDealTransferring, providerState[0].State) + + // This wait group will complete after the deal has completed on both the + // client and provider + expireWg := sync.WaitGroup{} + expireWg.Add(1) + _ = newProvider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + //fmt.Printf("New Provider %s: %s\n", storagemarket.ProviderEvents[event], storagemarket.DealStates[deal.State]) + if event == storagemarket.ProviderEventDealExpired { + expireWg.Done() + } + }) + + expireWg.Add(1) + _ = client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == storagemarket.ClientEventDealExpired { + expireWg.Done() + } + }) + + // sleep for a moment + time.Sleep(1 * time.Second) + t.Log("finished sleeping") + + // Restore connection, go-data-transfer should try to reconnect + require.NoError(t, h.TestData.MockNet.LinkAll()) + time.Sleep(200 * time.Millisecond) + conn, err := h.TestData.MockNet.ConnectPeers(host1.ID(), host2.ID()) + require.NoError(t, err) + require.NotNil(t, conn) + + // Restart the provider + shared_testutil.StartAndWaitForReady(ctx, t, newProvider) + t.Log("------- provider has been restarted---------") + + // ------------------------------------------------------------------- + // How to restart manually - shouldn't be needed as the data-transfer + // module will restart automatically, but leaving it here in case it's + // needed for debugging in future. + //chs, err := h.DTClient.InProgressChannels(ctx) + //require.Len(t, chs, 1) + //for chid := range chs { + // h.DTClient.RestartDataTransferChannel(ctx, chid) + //} + // ------------------------------------------------------------------- + + // Wait till both client and provider have completed the deal + waitGroupWait(ctx, &expireWg) + t.Log("---------- finished waiting for expected events-------") + + // Ensure the client and provider both reached the final state + cd, err = client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) + + providerDeals, err := newProvider.ListLocalDeals() + require.NoError(t, err) + + pd := providerDeals[0] + require.Equal(t, pd.ProposalCid, proposalCid) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) +} + +// TestRestartProviderAtPublishStage tests that if the provider is restarted +// when it's in the publish state, it will successfully complete the deal +func TestRestartProviderAtPublishStage(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + td := shared_testutil.NewLibp2pTestData(ctx, t) + smState := testnodes.NewStorageMarketState() + depGen := dependencies.NewDepGenerator() + deps := depGen.New(t, ctx, td, smState, "", noOpDelay, noOpDelay) + h := testharness.NewHarnessWithTestData(t, td, deps, true, false) + + // start client and provider + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + require.NoError(t, err) + + // Listen for when the provider reaches the Publish state, and shut it down + wgProviderPublish := sync.WaitGroup{} + wgProviderPublish.Add(1) + var providerState []storagemarket.MinerDeal + h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + t.Logf("Provider %s: %s\n", storagemarket.ProviderEvents[event], storagemarket.DealStates[deal.State]) + if deal.State == storagemarket.StorageDealPublish { + require.NoError(t, h.Provider.Stop()) + + time.Sleep(time.Second) + + providerState, err = h.Provider.ListLocalDeals() + assert.NoError(t, err) + wgProviderPublish.Done() + } + }) + + // Propose a storage deal + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + proposalCid := result.ProposalCid + t.Log("storage deal proposed") + + // Wait till the deal reaches the Publish state + waitGroupWait(ctx, &wgProviderPublish) + t.Log("provider has been shutdown") + + // Create new provider (but don't restart yet) + newProvider := h.CreateNewProvider(t, ctx, h.TestData) + + t.Logf("provider state after stopping is %s", storagemarket.DealStates[providerState[0].State]) + + // This wait group will complete after the deal has completed on both the + // client and provider + expireWg := sync.WaitGroup{} + expireWg.Add(1) + _ = newProvider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + t.Logf("New Provider %s: %s\n", storagemarket.ProviderEvents[event], storagemarket.DealStates[deal.State]) + if event == storagemarket.ProviderEventDealExpired { + expireWg.Done() + } + }) + + expireWg.Add(1) + _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == storagemarket.ClientEventDealExpired { + expireWg.Done() + } + }) + + // sleep for a moment + time.Sleep(1 * time.Second) + t.Log("finished sleeping") + + // Restart the provider + err = newProvider.Start(ctx) + require.NoError(t, err) + t.Log("------- provider has been restarted---------") + + // Wait till both client and provider have completed the deal + waitGroupWait(ctx, &expireWg) + t.Log("---------- finished waiting for expected events-------") + + // Ensure the provider reached the final state + providerDeals, err := newProvider.ListLocalDeals() + require.NoError(t, err) + + pd := providerDeals[0] + require.Equal(t, pd.ProposalCid, proposalCid) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) +} + +// FIXME Gets hung sometimes +func TestRestartClient(t *testing.T) { + testCases := map[string]struct { + stopAtClientEvent storagemarket.ClientEvent + stopAtProviderEvent storagemarket.ProviderEvent + + expectedClientState storagemarket.StorageDealStatus + clientDelay testnodes.DelayFakeCommonNode + providerDelay testnodes.DelayFakeCommonNode + }{ + + "ClientEventDataTransferInitiated": { + // This test can fail if client crashes without seeing a Provider DT complete + // See https://github.com/filecoin-project/lotus/issues/3966 + stopAtClientEvent: storagemarket.ClientEventDataTransferInitiated, + expectedClientState: storagemarket.StorageDealTransferring, + clientDelay: noOpDelay, + providerDelay: noOpDelay, + }, + + "ClientEventDataTransferComplete": { + stopAtClientEvent: storagemarket.ClientEventDataTransferComplete, + stopAtProviderEvent: storagemarket.ProviderEventDataTransferCompleted, + expectedClientState: storagemarket.StorageDealCheckForAcceptance, + }, + + "ClientEventFundingComplete": { + // Edge case: Provider begins the state machine on receiving a deal stream request + // client crashes -> restarts -> sends deal stream again -> state machine fails + // See https://github.com/filecoin-project/lotus/issues/3966 + stopAtClientEvent: storagemarket.ClientEventFundingComplete, + expectedClientState: storagemarket.StorageDealFundsReserved, + clientDelay: noOpDelay, + providerDelay: noOpDelay, + }, + + // FIXME + "ClientEventInitiateDataTransfer": { // works well but sometimes state progresses beyond StorageDealStartDataTransfer + stopAtClientEvent: storagemarket.ClientEventInitiateDataTransfer, + expectedClientState: storagemarket.StorageDealStartDataTransfer, + clientDelay: noOpDelay, + providerDelay: noOpDelay, + }, + + "ClientEventDealAccepted": { // works well + stopAtClientEvent: storagemarket.ClientEventDealAccepted, + expectedClientState: storagemarket.StorageDealProposalAccepted, + clientDelay: testnodes.DelayFakeCommonNode{ValidatePublishedDeal: true}, + providerDelay: testnodes.DelayFakeCommonNode{OnDealExpiredOrSlashed: true}, + }, + + "ClientEventDealActivated": { // works well + stopAtClientEvent: storagemarket.ClientEventDealActivated, + expectedClientState: storagemarket.StorageDealActive, + clientDelay: testnodes.DelayFakeCommonNode{OnDealExpiredOrSlashed: true}, + providerDelay: testnodes.DelayFakeCommonNode{OnDealExpiredOrSlashed: true}, + }, + + "ClientEventDealPublished": { // works well + stopAtClientEvent: storagemarket.ClientEventDealPublished, + expectedClientState: storagemarket.StorageDealSealing, + clientDelay: testnodes.DelayFakeCommonNode{OnDealSectorCommitted: true}, + providerDelay: testnodes.DelayFakeCommonNode{OnDealExpiredOrSlashed: true}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 50*time.Second) + defer cancel() + h := testharness.NewHarness(t, ctx, true, tc.clientDelay, tc.providerDelay, false) + host1 := h.TestData.Host1 + host2 := h.TestData.Host2 + + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(1) + var providerState []storagemarket.MinerDeal + _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == tc.stopAtClientEvent { + // Stop the client and provider at some point during deal negotiation + ev := storagemarket.ClientEvents[event] + t.Logf("event %s has happened on client, shutting down client and provider", ev) + require.NoError(t, h.Client.Stop()) + require.NoError(t, h.TestData.MockNet.UnlinkPeers(host1.ID(), host2.ID())) + require.NoError(t, h.TestData.MockNet.DisconnectPeers(host1.ID(), host2.ID())) + + // if a provider stop event isn't specified, just stop the provider here + if tc.stopAtProviderEvent == 0 { + require.NoError(t, h.Provider.Stop()) + } + + // deal could have expired already on the provider side for the `ClientEventDealAccepted` event + // so, we should wait on the `ProviderEventDealExpired` event ONLY if the deal has not expired. + providerState, err = h.Provider.ListLocalDeals() + assert.NoError(t, err) + wg.Done() + } + }) + + // if this test case specifies a provider stop event... + if tc.stopAtProviderEvent != 0 { + wg.Add(1) + + _ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if event == tc.stopAtProviderEvent { + require.NoError(t, h.Provider.Stop()) + wg.Done() + } + }) + } + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + proposalCid := result.ProposalCid + t.Log("storage deal proposed") + + waitGroupWait(ctx, &wg) + t.Log("both client and provider have been shutdown the first time") + + cd, err := h.Client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + t.Logf("client state after stopping is %s", storagemarket.DealStates[cd.State]) + if tc.expectedClientState != cd.State { + t.Logf("client state message: %s", cd.Message) + require.Fail(t, fmt.Sprintf("client deal state mismatch:\nexpected: %s\nactual: %s", + storagemarket.DealStates[tc.expectedClientState], storagemarket.DealStates[cd.State])) + } + + deps := dependencies.NewDependenciesWithTestData(t, ctx, h.TestData, h.SMState, "", noOpDelay, noOpDelay) + h = testharness.NewHarnessWithTestData(t, h.TestData, deps, true, false) + + if len(providerState) == 0 { + t.Log("no deal created on provider after stopping") + } else { + t.Logf("provider state after stopping is %s", storagemarket.DealStates[providerState[0].State]) + } + + if len(providerState) == 0 || providerState[0].State != storagemarket.StorageDealExpired { + wg.Add(1) + _ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if deal.State == storagemarket.StorageDealError { + t.Errorf("storage deal provider error: %s", deal.Message) + wg.Done() + } + if event == storagemarket.ProviderEventDealExpired { + wg.Done() + } + }) + } + wg.Add(1) + _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if deal.State == storagemarket.StorageDealError { + t.Errorf("storage deal client error: %s", deal.Message) + wg.Done() + } + if event == storagemarket.ClientEventDealExpired { + wg.Done() + } + }) + + require.NoError(t, h.TestData.MockNet.LinkAll()) + time.Sleep(200 * time.Millisecond) + conn, err := h.TestData.MockNet.ConnectPeers(host1.ID(), host2.ID()) + require.NoError(t, err) + require.NotNil(t, conn) + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + t.Log("------- client and provider have been restarted---------") + waitGroupWait(ctx, &wg) + t.Log("---------- finished waiting for expected events-------") + + cd, err = h.Client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) + + providerDeals, err := h.Provider.ListLocalDeals() + require.NoError(t, err) + + pd := providerDeals[0] + require.Equal(t, pd.ProposalCid, proposalCid) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) + }) + } +} + +// TestBounceConnectionDataTransfer tests that when the the connection is +// broken and then restarted, the data transfer will resume and the deal will +// complete successfully. +func TestBounceConnectionDataTransfer(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Configure data-transfer to make 5 attempts, backing off 1s each time + dtClientNetRetry := dtnet.RetryParameters(time.Second, time.Second, 5, 1) + td := shared_testutil.NewLibp2pTestData(ctx, t) + td.DTNet1 = dtnet.NewFromLibp2pHost(td.Host1, dtClientNetRetry) + + // Configure data-transfer to automatically restart when connection goes down + restartConf := dtimpl.ChannelRestartConfig(channelmonitor.Config{ + AcceptTimeout: 100 * time.Millisecond, + RestartBackoff: 100 * time.Millisecond, + RestartDebounce: 100 * time.Millisecond, + MaxConsecutiveRestarts: 5, + CompleteTimeout: 100 * time.Millisecond, + }) + smState := testnodes.NewStorageMarketState() + depGen := dependencies.NewDepGenerator() + depGen.ClientNewDataTransfer = func(ds datastore.Batching, dir string, transferNetwork dtnet.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) { + return dtimpl.NewDataTransfer(ds, transferNetwork, transport, restartConf) + } + deps := depGen.New(t, ctx, td, smState, "", noOpDelay, noOpDelay) + h := testharness.NewHarnessWithTestData(t, td, deps, true, false) + + client := h.Client + clientHost := h.TestData.Host1.ID() + providerHost := h.TestData.Host2.ID() + + // start client and provider + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + require.NoError(t, err) + + // Bounce connection after this many bytes have been queued for sending + bounceConnectionAt := map[uint64]bool{ + 1000: false, + 5000: false, + } + h.DTClient.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + //t.Logf("dt-clnt %s: %s %s\n", datatransfer.Events[event.Code], datatransfer.Statuses[channelState.Status()], channelState.Message()) + if event.Code == datatransfer.DataQueuedProgress { + //t.Logf(" > qued %d", channelState.Queued()) + + // Check if enough bytes have been queued that the connection + // should be bounced + for at, already := range bounceConnectionAt { + if channelState.Sent() > at && !already { + bounceConnectionAt[at] = true + + // Break the connection + queued := channelState.Queued() + t.Logf(" breaking connection after sending %d bytes", queued) + h.TestData.MockNet.DisconnectPeers(clientHost, providerHost) + h.TestData.MockNet.UnlinkPeers(clientHost, providerHost) + + go func() { + // Restore the connection + time.Sleep(100 * time.Millisecond) + t.Logf(" restoring connection from bounce at %d bytes", queued) + h.TestData.MockNet.LinkPeers(clientHost, providerHost) + }() + } + } + } + //if event.Code == datatransfer.DataSentProgress { + // t.Logf(" > sent %d", channelState.Sent()) + //} + }) + //h.DTProvider.SubscribeToEvents(func(event datatransfer.Event, channelState datatransfer.ChannelState) { + // if event.Code == datatransfer.DataReceivedProgress { + // t.Logf(" > rcvd %d", channelState.Received()) + // } + //}) + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + proposalCid := result.ProposalCid + t.Log("storage deal proposed") + + // This wait group will complete after the deal has completed on both the + // client and provider + expireWg := sync.WaitGroup{} + expireWg.Add(1) + _ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if event == storagemarket.ProviderEventDealExpired { + expireWg.Done() + } + }) + + expireWg.Add(1) + _ = client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == storagemarket.ClientEventDealExpired { + expireWg.Done() + } + }) + + // Wait till both client and provider have completed the deal + waitGroupWait(ctx, &expireWg) + t.Log("---------- finished waiting for expected events-------") + + // Ensure the client and provider both reached the final state + cd, err := client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) + + providerDeals, err := h.Provider.ListLocalDeals() + require.NoError(t, err) + + pd := providerDeals[0] + require.Equal(t, pd.ProposalCid, proposalCid) + shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) +} + +// TestCancelDataTransfer tests that cancelling a data transfer cancels the deal +func TestCancelDataTransfer(t *testing.T) { + run := func(t *testing.T, cancelByClient bool, hasConnectivity bool) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + h := testharness.NewHarness(t, ctx, true, noOpDelay, noOpDelay, false) + client := h.Client + provider := h.Provider + host1 := h.TestData.Host1 + host2 := h.TestData.Host2 + + // start client and provider + shared_testutil.StartAndWaitForReady(ctx, t, h.Provider) + shared_testutil.StartAndWaitForReady(ctx, t, h.Client) + + // set ask price where we'll accept any price + err := h.Provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + require.NoError(t, err) + + // wait for client to start transferring data + wg := sync.WaitGroup{} + wg.Add(1) + _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == storagemarket.ClientEventDataTransferInitiated { + ev := storagemarket.ClientEvents[event] + t.Logf("event %s has happened on client", ev) + + if !hasConnectivity { + t.Logf("disconnecting client and provider") + // Simulate the connection to the remote peer going down + require.NoError(t, h.TestData.MockNet.UnlinkPeers(host1.ID(), host2.ID())) + require.NoError(t, h.TestData.MockNet.DisconnectPeers(host1.ID(), host2.ID())) + } + + wg.Done() + } + }) + + result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) + proposalCid := result.ProposalCid + t.Log("storage deal proposed") + + waitGroupWait(ctx, &wg) + if !hasConnectivity { + t.Log("network has been disconnected") + } + + // Assert client is transferring data + cd, err := client.GetLocalDeal(ctx, proposalCid) + require.NoError(t, err) + t.Logf("client state after stopping is %s", storagemarket.DealStates[cd.State]) + require.True(t, cd.State == storagemarket.StorageDealStartDataTransfer || cd.State == storagemarket.StorageDealTransferring) + + // Keep track of client states + var clientErroredOut sync.WaitGroup + var clientstates []storagemarket.StorageDealStatus + + // Client will only move to error state if + // - client initiates cancel + // - client receives cancel message from provider + if cancelByClient || hasConnectivity { + clientErroredOut.Add(1) + _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if len(clientstates) == 0 || deal.State != clientstates[len(clientstates)-1] { + clientstates = append(clientstates, deal.State) + } + + if deal.State == storagemarket.StorageDealError { + clientErroredOut.Done() + } + }) + } + + // Keep track of provider states + var providerErroredOut sync.WaitGroup + var providerstates []storagemarket.StorageDealStatus + + // Provider will only move to error state if + // - provider initiates cancel + // - provider receives cancel message from client + if !cancelByClient || hasConnectivity { + providerErroredOut.Add(1) + _ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if len(providerstates) == 0 || deal.State != providerstates[len(providerstates)-1] { + providerstates = append(providerstates, deal.State) + } + + if deal.State == storagemarket.StorageDealError { + providerErroredOut.Done() + } + }) + } + + // Should be one in-progress channel + chans, err := h.DTClient.InProgressChannels(ctx) + require.NoError(t, err) + require.Len(t, chans, 1) + for _, ch := range chans { + require.Equal(t, datatransfer.Ongoing, ch.Status()) + + dt := h.DTClient + if !cancelByClient { + dt = h.DTProvider + } + + // Simulate data transfer channel being cancelled + chid := ch.ChannelID() + if hasConnectivity { + err := dt.CloseDataTransferChannel(ctx, chid) + require.NoError(t, err) + } else { + // If the network is down, use a short timeout so that the test + // doesn't take too long to complete + ctx, closeCtxCancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer closeCtxCancel() + _ = dt.CloseDataTransferChannel(ctx, chid) + } + } + + // Wait for the state machines to reach the error state + waitGroupWait(ctx, &clientErroredOut) + waitGroupWait(ctx, &providerErroredOut) + + // Make sure state machine passed through expected states + possStates := []storagemarket.StorageDealStatus{ + storagemarket.StorageDealTransferring, + storagemarket.StorageDealFailing, + storagemarket.StorageDealError, + } + expClientStates := possStates[len(possStates)-len(clientstates):] + assert.Equal(t, dealStatesToStrings(expClientStates), dealStatesToStrings(clientstates)) + expProviderStates := possStates[len(possStates)-len(providerstates):] + assert.Equal(t, dealStatesToStrings(expProviderStates), dealStatesToStrings(providerstates)) + + // Verify the error message for the deal is correct + if cancelByClient || hasConnectivity { + deals, err := client.ListLocalDeals(ctx) + require.NoError(t, err) + assert.Len(t, deals, 1) + assert.Equal(t, "data transfer cancelled", deals[0].Message) + } + if !cancelByClient || hasConnectivity { + pdeals, err := provider.ListLocalDeals() + require.NoError(t, err) + assert.Len(t, pdeals, 1) + assert.Equal(t, "data transfer cancelled", pdeals[0].Message) + } + } + + t.Run("client cancel request good connectivity", func(t *testing.T) { + run(t, true, true) + }) + t.Run("client cancel request no connectivity", func(t *testing.T) { + run(t, true, false) + }) + t.Run("provider cancel request good connectivity", func(t *testing.T) { + run(t, false, true) + }) + t.Run("provider cancel request no connectivity", func(t *testing.T) { + run(t, false, false) + }) +} + +// waitGroupWait calls wg.Wait while respecting context cancellation +func waitGroupWait(ctx context.Context, wg *sync.WaitGroup) { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + case <-done: + } +} + +func dealStatesToStrings(states []storagemarket.StorageDealStatus) []string { + var out []string + for _, state := range states { + out = append(out, storagemarket.DealStates[state]) + } + return out +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations.go new file mode 100644 index 00000000000..0dd5d915c68 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations.go @@ -0,0 +1,325 @@ +package migrations + +import ( + "context" + "fmt" + "unicode/utf8" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + versioning "github.com/filecoin-project/go-ds-versioning/pkg" + "github.com/filecoin-project/go-ds-versioning/pkg/versioned" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +//go:generate cbor-gen-for ClientDeal0 MinerDeal0 Balance0 SignedStorageAsk0 StorageAsk0 DataRef0 ProviderDealState0 AskRequest0 AskResponse0 Proposal0 Response0 SignedResponse0 DealStatusRequest0 DealStatusResponse0 + +// Balance0 is version 0 of Balance +type Balance0 struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// StorageAsk0 is version 0 of StorageAsk +type StorageAsk0 struct { + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk0 is version 0 of SignedStorageAsk +type SignedStorageAsk0 struct { + Ask *StorageAsk0 + Signature *crypto.Signature +} + +// MinerDeal0 is version 0 of MinerDeal +type MinerDeal0 struct { + marketOld.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State storagemarket.StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + StoreID *uint64 + FundsReserved abi.TokenAmount + Ref *DataRef0 + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime +} + +// ClientDeal0 is version 0 of ClientDeal +type ClientDeal0 struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State storagemarket.StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef0 + Message string + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + StoreID *uint64 + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime +} + +// DataRef0 is version 0 of DataRef +type DataRef0 struct { + TransferType string + Root cid.Cid + PieceCid *cid.Cid + PieceSize abi.UnpaddedPieceSize +} + +// ProviderDealState0 is version 0 of ProviderDealState +type ProviderDealState0 struct { + State storagemarket.StorageDealStatus + Message string + Proposal *market.DealProposal + ProposalCid *cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + DealID abi.DealID + FastRetrieval bool +} + +// Proposal0 is version 0 of Proposal +type Proposal0 struct { + DealProposal *market.ClientDealProposal + Piece *DataRef0 + FastRetrieval bool +} + +// Response0 is version 0 of Response +type Response0 struct { + State storagemarket.StorageDealStatus + + // DealProposalRejected + Message string + Proposal cid.Cid + + // StorageDealProposalAccepted + PublishMessage *cid.Cid +} + +// SignedResponse0 is version 0 of SignedResponse +type SignedResponse0 struct { + Response Response0 + Signature *crypto.Signature +} + +// AskRequest0 is version 0 of AskRequest +type AskRequest0 struct { + Miner address.Address +} + +// AskResponse0 is version 0 of AskResponse +type AskResponse0 struct { + Ask *SignedStorageAsk0 +} + +// DealStatusRequest0 is version 0 of DealStatusRequest +type DealStatusRequest0 struct { + Proposal cid.Cid + Signature crypto.Signature +} + +// DealStatusResponse0 is version 0 of DealStatusResponse +type DealStatusResponse0 struct { + DealState ProviderDealState0 + Signature crypto.Signature +} + +// MigrateDataRef0To1 migrates a tuple encoded data tref to a map encoded data ref +func MigrateDataRef0To1(oldDr *DataRef0) *storagemarket.DataRef { + if oldDr == nil { + return nil + } + return &storagemarket.DataRef{ + TransferType: oldDr.TransferType, + Root: oldDr.Root, + PieceCid: oldDr.PieceCid, + PieceSize: oldDr.PieceSize, + } +} + +// MigrateClientDeal0To1 migrates a tuple encoded client deal to a map encoded client deal +func MigrateClientDeal0To1(oldCd *ClientDeal0) (*storagemarket.ClientDeal, error) { + return &storagemarket.ClientDeal{ + ClientDealProposal: oldCd.ClientDealProposal, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + State: oldCd.State, + Miner: oldCd.Miner, + MinerWorker: oldCd.MinerWorker, + DealID: oldCd.DealID, + DataRef: MigrateDataRef0To1(oldCd.DataRef), + Message: oldCd.Message, + PublishMessage: oldCd.PublishMessage, + SlashEpoch: oldCd.SlashEpoch, + PollRetryCount: oldCd.PollRetryCount, + PollErrorCount: oldCd.PollErrorCount, + FastRetrieval: oldCd.FastRetrieval, + FundsReserved: oldCd.FundsReserved, + CreationTime: oldCd.CreationTime, + }, nil +} + +// MigrateMinerDeal0To1 migrates a tuple encoded miner deal to a map encoded miner deal +func MigrateMinerDeal0To1(oldCd *MinerDeal0) (*MinerDeal1, error) { + return &MinerDeal1{ + ClientDealProposal: oldCd.ClientDealProposal, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + PublishCid: oldCd.PublishCid, + Miner: oldCd.Miner, + Client: oldCd.Client, + State: oldCd.State, + PiecePath: oldCd.PiecePath, + MetadataPath: oldCd.MetadataPath, + SlashEpoch: oldCd.SlashEpoch, + FastRetrieval: oldCd.FastRetrieval, + Message: oldCd.Message, + FundsReserved: oldCd.FundsReserved, + Ref: MigrateDataRef0To1(oldCd.Ref), + AvailableForRetrieval: oldCd.AvailableForRetrieval, + DealID: oldCd.DealID, + CreationTime: oldCd.CreationTime, + }, nil +} + +// MigrateMinerDeal1To2 migrates a miner deal label to the new format +func MigrateMinerDeal1To2(oldCd *MinerDeal1) (*storagemarket.MinerDeal, error) { + clientDealProp, err := MigrateClientDealProposal0To1(oldCd.ClientDealProposal) + if err != nil { + return nil, fmt.Errorf("migrating deal with proposal cid %s: %w", oldCd.ProposalCid, err) + } + + return &storagemarket.MinerDeal{ + ClientDealProposal: *clientDealProp, + ProposalCid: oldCd.ProposalCid, + AddFundsCid: oldCd.AddFundsCid, + PublishCid: oldCd.PublishCid, + Miner: oldCd.Miner, + Client: oldCd.Client, + State: oldCd.State, + PiecePath: oldCd.PiecePath, + MetadataPath: oldCd.MetadataPath, + SlashEpoch: oldCd.SlashEpoch, + FastRetrieval: oldCd.FastRetrieval, + Message: oldCd.Message, + FundsReserved: oldCd.FundsReserved, + Ref: oldCd.Ref, + AvailableForRetrieval: oldCd.AvailableForRetrieval, + DealID: oldCd.DealID, + CreationTime: oldCd.CreationTime, + }, nil +} + +func MigrateClientDealProposal0To1(prop marketOld.ClientDealProposal) (*storagemarket.ClientDealProposal, error) { + oldLabel := prop.Proposal.Label + + var err error + var newLabel market.DealLabel + if utf8.ValidString(oldLabel) { + newLabel, err = market.NewLabelFromString(oldLabel) + if err != nil { + return nil, fmt.Errorf("migrating deal label to DealLabel (string): %w", err) + } + } else { + newLabel, err = market.NewLabelFromBytes([]byte(oldLabel)) + if err != nil { + return nil, fmt.Errorf("migrating deal label to DealLabel (byte): %w", err) + } + } + + return &storagemarket.ClientDealProposal{ + ClientSignature: prop.ClientSignature, + Proposal: market.DealProposal{ + PieceCID: prop.Proposal.PieceCID, + PieceSize: prop.Proposal.PieceSize, + VerifiedDeal: prop.Proposal.VerifiedDeal, + Client: prop.Proposal.Client, + Provider: prop.Proposal.Provider, + Label: newLabel, + StartEpoch: prop.Proposal.StartEpoch, + EndEpoch: prop.Proposal.EndEpoch, + StoragePricePerEpoch: prop.Proposal.StoragePricePerEpoch, + ProviderCollateral: prop.Proposal.ProviderCollateral, + ClientCollateral: prop.Proposal.ClientCollateral, + }, + }, nil +} + +// MigrateStorageAsk0To1 migrates a tuple encoded storage ask to a map encoded storage ask +func MigrateStorageAsk0To1(oldSa *StorageAsk0) *storagemarket.StorageAsk { + return &storagemarket.StorageAsk{ + Price: oldSa.Price, + VerifiedPrice: oldSa.VerifiedPrice, + + MinPieceSize: oldSa.MinPieceSize, + MaxPieceSize: oldSa.MaxPieceSize, + Miner: oldSa.Miner, + Timestamp: oldSa.Timestamp, + Expiry: oldSa.Expiry, + SeqNo: oldSa.SeqNo, + } +} + +// GetMigrateSignedStorageAsk0To1 returns a function that migrates a tuple encoded signed storage ask to a map encoded signed storage ask +// It needs a signing function to resign the ask -- there's no way around that +func GetMigrateSignedStorageAsk0To1(sign func(ctx context.Context, ask *storagemarket.StorageAsk) (*crypto.Signature, error)) func(*SignedStorageAsk0) (*storagemarket.SignedStorageAsk, error) { + return func(oldSsa *SignedStorageAsk0) (*storagemarket.SignedStorageAsk, error) { + newSa := MigrateStorageAsk0To1(oldSsa.Ask) + sig, err := sign(context.TODO(), newSa) + if err != nil { + return nil, err + } + return &storagemarket.SignedStorageAsk{ + Ask: newSa, + Signature: sig, + }, nil + } +} + +// ClientMigrations are migrations for the client's store of storage deals +var ClientMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateClientDeal0To1, versioning.VersionKey("1")), +} + +// ProviderMigrations are migrations for the providers's store of storage deals +var ProviderMigrations = versioned.BuilderList{ + versioned.NewVersionedBuilder(MigrateMinerDeal0To1, versioning.VersionKey("1")).FilterKeys([]string{ + "/latest-ask", "/storage-ask/latest", "/storage-ask/1/latest", "/storage-ask/versions/current"}), + versioned.NewVersionedBuilder(MigrateMinerDeal1To2, versioning.VersionKey("2")).FilterKeys([]string{ + "/latest-ask", "/storage-ask/latest", "/storage-ask/1/latest", "/storage-ask/versions/current"}).OldVersion("1"), +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_cbor_gen.go new file mode 100644 index 00000000000..d8742c94074 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_cbor_gen.go @@ -0,0 +1,2271 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + filestore "github.com/filecoin-project/go-fil-markets/filestore" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufClientDeal0 = []byte{145} + +func (t *ClientDeal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufClientDeal0); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.MinerWorker (address.Address) (struct) + if err := t.MinerWorker.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DataRef (migrations.DataRef0) (struct) + if err := t.DataRef.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.PublishMessage (cid.Cid) (struct) + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.PollRetryCount (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { + return err + } + + // t.PollErrorCount (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ClientDeal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDeal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 17 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Miner (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.MinerWorker (address.Address) (struct) + + { + + if err := t.MinerWorker.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } + + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DataRef (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef0) + if err := t.DataRef.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PublishMessage (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.PollRetryCount (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) + + } + // t.PollErrorCount (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.CreationTime (typegen.CborTime) (struct) + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + return nil +} + +var lengthBufMinerDeal0 = []byte{146} + +func (t *MinerDeal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMinerDeal0); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.StoreID (uint64) (uint64) + + if t.StoreID == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ref (migrations.DataRef0) (struct) + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *MinerDeal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 18 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (uint64) (uint64) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := uint64(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef0) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + return nil +} + +var lengthBufBalance0 = []byte{130} + +func (t *Balance0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufBalance0); err != nil { + return err + } + + // t.Locked (big.Int) (struct) + if err := t.Locked.MarshalCBOR(cw); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if err := t.Available.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Balance0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Balance0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Locked (big.Int) (struct) + + { + + if err := t.Locked.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + + { + + if err := t.Available.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + return nil +} + +var lengthBufSignedStorageAsk0 = []byte{130} + +func (t *SignedStorageAsk0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedStorageAsk0); err != nil { + return err + } + + // t.Ask (migrations.StorageAsk0) (struct) + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk0) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Ask (migrations.StorageAsk0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk0) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufStorageAsk0 = []byte{136} + +func (t *StorageAsk0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufStorageAsk0); err != nil { + return err + } + + // t.Price (big.Int) (struct) + if err := t.Price.MarshalCBOR(cw); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if err := t.VerifiedPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if t.Timestamp >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.Expiry (abi.ChainEpoch) (int64) + if t.Expiry >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.SeqNo (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + return nil +} + +func (t *StorageAsk0) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageAsk0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 8 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Price (big.Int) (struct) + + { + + if err := t.Price.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.VerifiedPrice (big.Int) (struct) + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Timestamp (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.Expiry (abi.ChainEpoch) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.SeqNo (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + return nil +} + +var lengthBufDataRef0 = []byte{132} + +func (t *DataRef0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDataRef0); err != nil { + return err + } + + // t.TransferType (string) (string) + if len(t.TransferType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.TransferType was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TransferType))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.TransferType)); err != nil { + return err + } + + // t.Root (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Root); err != nil { + return xerrors.Errorf("failed to write cid field t.Root: %w", err) + } + + // t.PieceCid (cid.Cid) (struct) + + if t.PieceCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + } + + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + return nil +} + +func (t *DataRef0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DataRef0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.TransferType (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.TransferType = string(sval) + } + // t.Root (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } + + t.Root = c + + } + // t.PieceCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } + + } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) + + } + return nil +} + +var lengthBufProviderDealState0 = []byte{136} + +func (t *ProviderDealState0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProviderDealState0); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if err := t.Proposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + + if t.ProposalCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + } + + // t.AddFundsCid (cid.Cid) (struct) + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState0) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 8 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + + } + // t.ProposalCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } + + } + // t.AddFundsCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufAskRequest0 = []byte{129} + +func (t *AskRequest0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAskRequest0); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskRequest0) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskRequest0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + return nil +} + +var lengthBufAskResponse0 = []byte{129} + +func (t *AskResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufAskResponse0); err != nil { + return err + } + + // t.Ask (migrations.SignedStorageAsk0) (struct) + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Ask (migrations.SignedStorageAsk0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(SignedStorageAsk0) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufProposal0 = []byte{131} + +func (t *Proposal0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufProposal0); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.Piece (migrations.DataRef0) (struct) + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealProposal (market.ClientDealProposal) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.Piece (migrations.DataRef0) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(DataRef0) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufResponse0 = []byte{132} + +func (t *Response0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufResponse0); err != nil { + return err + } + + // t.State (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.PublishMessage (cid.Cid) (struct) + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + return nil +} + +func (t *Response0) UnmarshalCBOR(r io.Reader) (err error) { + *t = Response0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.State (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.PublishMessage (cid.Cid) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + return nil +} + +var lengthBufSignedResponse0 = []byte{130} + +func (t *SignedResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufSignedResponse0); err != nil { + return err + } + + // t.Response (migrations.Response0) (struct) + if err := t.Response.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Response (migrations.Response0) (struct) + + { + + if err := t.Response.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Response: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufDealStatusRequest0 = []byte{130} + +func (t *DealStatusRequest0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealStatusRequest0); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusRequest0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusRequest0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} + +var lengthBufDealStatusResponse0 = []byte{130} + +func (t *DealStatusResponse0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufDealStatusResponse0); err != nil { + return err + } + + // t.DealState (migrations.ProviderDealState0) (struct) + if err := t.DealState.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusResponse0) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusResponse0{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealState (migrations.ProviderDealState0) (struct) + + { + + if err := t.DealState.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealState: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types.go new file mode 100644 index 00000000000..983d67f7bc2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types.go @@ -0,0 +1,55 @@ +package migrations + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// Some of the types in the migrations file are CBOR array-encoded, and some +// are map-encoded. The --map-encoding parameter must be specified in a +// generate directive in a separate file. So we define CBOR map-encoded types +// in this file + +//go:generate cbor-gen-for --map-encoding Proposal1 MinerDeal1 + +// Proposal1 is version 1 of Proposal (used by deal proposal protocol v1.1.0) +type Proposal1 struct { + DealProposal *marketOld.ClientDealProposal + Piece *storagemarket.DataRef + FastRetrieval bool +} + +// MinerDeal1 is version 1 of MinerDeal +type MinerDeal1 struct { + marketOld.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State storagemarket.StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + FundsReserved abi.TokenAmount + Ref *storagemarket.DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID + SectorNumber abi.SectorNumber + + InboundCAR string +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types_cbor_gen.go new file mode 100644 index 00000000000..edb4e7197f1 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/migrations/migrations_mapenc_types_cbor_gen.go @@ -0,0 +1,936 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package migrations + +import ( + "fmt" + "io" + "math" + "sort" + + datatransfer "github.com/filecoin-project/go-data-transfer" + filestore "github.com/filecoin-project/go-fil-markets/filestore" + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *Proposal1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.Piece (storagemarket.DataRef) (struct) + if len("Piece") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Piece\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Piece")); err != nil { + return err + } + + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal1) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Proposal1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (market.ClientDealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.Piece (storagemarket.DataRef) (struct) + case "Piece": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(storagemarket.DataRef) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *MinerDeal1) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{180}); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Client")); err != nil { + return err + } + + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PiecePath")); err != nil { + return err + } + + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ref (storagemarket.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AvailableForRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelId")); err != nil { + return err + } + + if err := t.TransferChannelId.MarshalCBOR(cw); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.InboundCAR (string) (string) + if len("InboundCAR") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"InboundCAR\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("InboundCAR"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("InboundCAR")); err != nil { + return err + } + + if len(t.InboundCAR) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.InboundCAR was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.InboundCAR))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.InboundCAR)); err != nil { + return err + } + return nil +} + +func (t *MinerDeal1) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal1{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal1: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + case "Client": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (storagemarket.DataRef) (struct) + case "Ref": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(storagemarket.DataRef) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.InboundCAR (string) (string) + case "InboundCAR": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.InboundCAR = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/ask_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/ask_stream.go new file mode 100644 index 00000000000..eed302e7c3f --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/ask_stream.go @@ -0,0 +1,58 @@ +package network + +import ( + "bufio" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" +) + +type askStream struct { + p peer.ID + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ StorageAskStream = (*askStream)(nil) + +func (as *askStream) ReadAskRequest() (AskRequest, error) { + var a AskRequest + + if err := a.UnmarshalCBOR(as.buffered); err != nil { + log.Warn(err) + return AskRequestUndefined, err + + } + + return a, nil +} + +func (as *askStream) WriteAskRequest(q AskRequest) error { + return cborutil.WriteCborRPC(as.rw, &q) +} + +func (as *askStream) ReadAskResponse() (AskResponse, []byte, error) { + var resp AskResponse + + if err := resp.UnmarshalCBOR(as.buffered); err != nil { + log.Warn(err) + return AskResponseUndefined, nil, err + } + + origBytes, err := cborutil.Dump(resp.Ask.Ask) + if err != nil { + log.Warn(err) + return AskResponseUndefined, nil, err + } + return resp, origBytes, nil +} + +func (as *askStream) WriteAskResponse(qr AskResponse, _ ResigningFunc) error { + return cborutil.WriteCborRPC(as.rw, &qr) +} + +func (as *askStream) Close() error { + return as.rw.Close() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_status_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_status_stream.go new file mode 100644 index 00000000000..e23a46cf9b2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_status_stream.go @@ -0,0 +1,60 @@ +package network + +import ( + "bufio" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" +) + +type dealStatusStream struct { + p peer.ID + host host.Host + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ DealStatusStream = (*dealStatusStream)(nil) + +func (d *dealStatusStream) ReadDealStatusRequest() (DealStatusRequest, error) { + var q DealStatusRequest + + if err := q.UnmarshalCBOR(d.buffered); err != nil { + log.Warn(err) + return DealStatusRequestUndefined, err + } + return q, nil +} + +func (d *dealStatusStream) WriteDealStatusRequest(q DealStatusRequest) error { + return cborutil.WriteCborRPC(d.rw, &q) +} + +func (d *dealStatusStream) ReadDealStatusResponse() (DealStatusResponse, []byte, error) { + var qr DealStatusResponse + + if err := qr.UnmarshalCBOR(d.buffered); err != nil { + return DealStatusResponseUndefined, nil, err + } + + origBytes, err := cborutil.Dump(&qr.DealState) + if err != nil { + return DealStatusResponseUndefined, nil, err + } + return qr, origBytes, nil +} + +func (d *dealStatusStream) WriteDealStatusResponse(qr DealStatusResponse, _ ResigningFunc) error { + return cborutil.WriteCborRPC(d.rw, &qr) +} + +func (d *dealStatusStream) Close() error { + return d.rw.Close() +} + +func (d *dealStatusStream) RemotePeer() peer.ID { + return d.p +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream.go new file mode 100644 index 00000000000..00bb265114d --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream.go @@ -0,0 +1,63 @@ +package network + +import ( + "bufio" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" +) + +// TagPriority is the priority for deal streams -- they should generally be preserved above all else +const TagPriority = 100 + +type dealStreamv111 struct { + p peer.ID + host host.Host + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ StorageDealStream = (*dealStreamv111)(nil) + +func (d *dealStreamv111) ReadDealProposal() (Proposal, error) { + var ds Proposal + + if err := ds.UnmarshalCBOR(d.buffered); err != nil { + log.Warn(err) + return ProposalUndefined, err + } + + return ds, nil +} + +func (d *dealStreamv111) WriteDealProposal(dp Proposal) error { + return cborutil.WriteCborRPC(d.rw, &dp) +} + +func (d *dealStreamv111) ReadDealResponse() (SignedResponse, []byte, error) { + var dr SignedResponse + + if err := dr.UnmarshalCBOR(d.buffered); err != nil { + return SignedResponseUndefined, nil, err + } + origBytes, err := cborutil.Dump(&dr.Response) + if err != nil { + return SignedResponseUndefined, nil, err + } + return dr, origBytes, nil +} + +func (d *dealStreamv111) WriteDealResponse(dr SignedResponse, _ ResigningFunc) error { + return cborutil.WriteCborRPC(d.rw, &dr) +} + +func (d *dealStreamv111) Close() error { + return d.rw.Close() +} + +func (d *dealStreamv111) RemotePeer() peer.ID { + return d.p +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110.go new file mode 100644 index 00000000000..d9cc3c4defa --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110.go @@ -0,0 +1,94 @@ +package network + +import ( + "bufio" + "encoding/hex" + "fmt" + "unicode/utf8" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +type dealStreamv110 struct { + p peer.ID + host host.Host + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ StorageDealStream = (*dealStreamv110)(nil) + +func (d *dealStreamv110) ReadDealProposal() (Proposal, error) { + var ds migrations.Proposal1 + + if err := ds.UnmarshalCBOR(d.buffered); err != nil { + err = fmt.Errorf("unmarshalling v110 deal proposal: %w", err) + log.Warnf(err.Error()) + return ProposalUndefined, err + } + + // The signature over the deal proposal will be different between a v1.1.0 + // deal proposal and higher versions if the deal label cannot be parsed as + // a utf8 string. + // The signature is checked when submitting the Publish Storage Deals + // message, so we reject the deal proposal here to avoid that scenario. + if err := checkDealLabel(ds.DealProposal.Proposal.Label); err != nil { + return ProposalUndefined, err + } + + // Migrate the deal proposal to the new format + prop, err := migrations.MigrateClientDealProposal0To1(*ds.DealProposal) + if err != nil { + err = fmt.Errorf("migrating v110 deal proposal to current version: %w", err) + log.Warnf(err.Error()) + return ProposalUndefined, err + } + return Proposal{ + DealProposal: prop, + Piece: ds.Piece, + FastRetrieval: ds.FastRetrieval, + }, nil +} + +func checkDealLabel(label string) error { + labelBytes := []byte(label) + if !utf8.Valid(labelBytes) { + return fmt.Errorf("cannot parse deal label 0x%s as string", hex.EncodeToString(labelBytes)) + } + return nil +} + +func (d *dealStreamv110) WriteDealProposal(dp Proposal) error { + return cborutil.WriteCborRPC(d.rw, &dp) +} + +func (d *dealStreamv110) ReadDealResponse() (SignedResponse, []byte, error) { + var dr SignedResponse + + if err := dr.UnmarshalCBOR(d.buffered); err != nil { + return SignedResponseUndefined, nil, err + } + origBytes, err := cborutil.Dump(&dr.Response) + if err != nil { + return SignedResponseUndefined, nil, err + } + return dr, origBytes, nil +} + +func (d *dealStreamv110) WriteDealResponse(dr SignedResponse, _ ResigningFunc) error { + return cborutil.WriteCborRPC(d.rw, &dr) +} + +func (d *dealStreamv110) Close() error { + return d.rw.Close() +} + +func (d *dealStreamv110) RemotePeer() peer.ID { + return d.p +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110_test.go new file mode 100644 index 00000000000..8e6c5613fbe --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/deal_stream_v110_test.go @@ -0,0 +1,164 @@ +package network + +import ( + "bufio" + "bytes" + "testing" + "unicode/utf8" + + "github.com/ipfs/go-cid" + "github.com/polydawn/refmt/cbor" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + marketOld "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +// TestReceivev110DealProposal verifies that the provider will reject a v110 +// deal that has a non-utf8 deal label +func TestReceivev110DealProposal(t *testing.T) { + runTest := func(label string, validUtf8 bool) { + dp := makeOldDealProposal() + dp.Proposal.Label = label + + dpReq := migrations.Proposal1{ + DealProposal: &dp, + } + + var buff bytes.Buffer + err := dpReq.MarshalCBOR(&buff) + require.NoError(t, err) + + ds := &dealStreamv110{ + buffered: bufio.NewReader(&buff), + } + prop, err := ds.ReadDealProposal() + if validUtf8 { + require.NoError(t, err) + require.True(t, prop.DealProposal.Proposal.Label.IsString()) + } else { + require.Error(t, err) + } + } + + t.Run("empty label", func(t *testing.T) { + runTest("", true) + }) + t.Run("string label", func(t *testing.T) { + runTest("label", true) + }) + t.Run("byte label", func(t *testing.T) { + label := []byte{66, 250} + require.False(t, utf8.Valid(label)) + runTest(string(label), false) + }) +} + +func TestDealLabelCheck(t *testing.T) { + err := checkDealLabel("") + require.NoError(t, err) + err = checkDealLabel("label") + require.NoError(t, err) + err = checkDealLabel(string([]byte{66, 250})) + require.Error(t, err) +} + +// Expect that CBOR marshaling a string will give the same result as marshaling +// a DealLabel with that string. +func TestLabelMatchingString(t *testing.T) { + str := "testing" + marshaledStr, err := cbor.Marshal(str) + require.NoError(t, err) + + l, err := market.NewLabelFromString(str) + require.NoError(t, err) + var marshaledLabel bytes.Buffer + err = l.MarshalCBOR(&marshaledLabel) + require.NoError(t, err) + + require.Equal(t, marshaledLabel.Bytes(), marshaledStr) +} + +// Expect that CBOR marshaling a string with bytes that are not valid utf8 +// will give a different result than marshaling a DealLabel with those bytes. +func TestLabelMatchingBytes(t *testing.T) { + bz := []byte{66, 250} + require.False(t, utf8.Valid(bz)) + marshaledStr, err := cbor.Marshal(string(bz)) + require.NoError(t, err) + + l, err := market.NewLabelFromBytes(bz) + require.NoError(t, err) + var marshaledLabelFromBytes bytes.Buffer + err = l.MarshalCBOR(&marshaledLabelFromBytes) + require.NoError(t, err) + + require.NotEqual(t, marshaledLabelFromBytes.Bytes(), marshaledStr) +} + +// TestSignedProposalCidMatching verifies that the ipld-marshaled signed deal +// proposal cid matches between the old deal proposal format and the new one +// for strings, but not for non-utf8 bytes +func TestSignedProposalCidMatching(t *testing.T) { + runTest := func(label string, expectEqual bool) { + oldDealProp := makeOldDealProposal() + oldDealProp.Proposal.Label = label + oldDealPropNd, err := cborutil.AsIpld(&oldDealProp) + require.NoError(t, err) + + //t.Logf("testing label %s", oldDealProp.Proposal.Label) + + newDealProp, err := migrations.MigrateClientDealProposal0To1(oldDealProp) + require.NoError(t, err) + newDealPropNd, err := cborutil.AsIpld(newDealProp) + require.NoError(t, err) + + require.Equal(t, expectEqual, oldDealPropNd.Cid() == newDealPropNd.Cid()) + } + + t.Run("empty label", func(t *testing.T) { + runTest("", true) + }) + t.Run("string label", func(t *testing.T) { + runTest("label", true) + }) + t.Run("byte label", func(t *testing.T) { + label := []byte{66, 250} + require.False(t, utf8.Valid(label)) + runTest(string(label), false) + }) +} + +func makeOldDealProposal() marketOld.ClientDealProposal { + pieceCid, err := cid.Parse("bafkqaaa") + if err != nil { + panic(err) + } + return marketOld.ClientDealProposal{ + Proposal: marketOld.DealProposal{ + PieceCID: pieceCid, + PieceSize: abi.PaddedPieceSize(2048), + + Client: address.TestAddress, + Provider: address.TestAddress2, + Label: "label", + + StartEpoch: abi.ChainEpoch(10), + EndEpoch: abi.ChainEpoch(20), + + StoragePricePerEpoch: abi.NewTokenAmount(1), + ProviderCollateral: abi.NewTokenAmount(2), + ClientCollateral: abi.NewTokenAmount(3), + }, + ClientSignature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: []byte("signature data"), + }, + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/doc.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/doc.go new file mode 100644 index 00000000000..84a589da4d4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/doc.go @@ -0,0 +1,11 @@ +/* +Package network providers an abstraction over a libp2p host for managing storage markets's Libp2p protocols: + +network.go - defines the interfaces that must be implemented to serve as a storage network layer +deal_stream.go - implements the `StorageDealStream` interface, a data stream for proposing storage deals +ask_stream.go - implements the `StorageAskStream` interface, a data stream for querying provider asks +deal_status_stream.go - implements the `StorageDealStatusStream` interface, a data stream for querying for deal status +libp2p_impl.go - provides the production implementation of the `StorageMarketNetwork` interface. +types.go - types for messages sent on the storage market libp2p protocols +*/ +package network diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_ask_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_ask_stream.go new file mode 100644 index 00000000000..e983ebe6da2 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_ask_stream.go @@ -0,0 +1,92 @@ +package network + +import ( + "bufio" + "context" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +type legacyAskStream struct { + p peer.ID + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ StorageAskStream = (*legacyAskStream)(nil) + +func (as *legacyAskStream) ReadAskRequest() (AskRequest, error) { + var a migrations.AskRequest0 + + if err := a.UnmarshalCBOR(as.buffered); err != nil { + log.Warn(err) + return AskRequestUndefined, err + + } + + return AskRequest{ + Miner: a.Miner, + }, nil +} + +func (as *legacyAskStream) WriteAskRequest(q AskRequest) error { + oldQ := migrations.AskRequest0{ + Miner: q.Miner, + } + return cborutil.WriteCborRPC(as.rw, &oldQ) +} + +func (as *legacyAskStream) ReadAskResponse() (AskResponse, []byte, error) { + var resp migrations.AskResponse0 + + if err := resp.UnmarshalCBOR(as.buffered); err != nil { + log.Warn(err) + return AskResponseUndefined, nil, err + } + + origBytes, err := cborutil.Dump(resp.Ask.Ask) + if err != nil { + log.Warn(err) + return AskResponseUndefined, nil, err + } + return AskResponse{ + Ask: &storagemarket.SignedStorageAsk{ + Ask: migrations.MigrateStorageAsk0To1(resp.Ask.Ask), + Signature: resp.Ask.Signature, + }, + }, origBytes, nil +} + +func (as *legacyAskStream) WriteAskResponse(qr AskResponse, resign ResigningFunc) error { + newAsk := qr.Ask.Ask + oldAsk := &migrations.StorageAsk0{ + Price: newAsk.Price, + VerifiedPrice: newAsk.VerifiedPrice, + MinPieceSize: newAsk.MinPieceSize, + MaxPieceSize: newAsk.MaxPieceSize, + Miner: newAsk.Miner, + Timestamp: newAsk.Timestamp, + Expiry: newAsk.Expiry, + SeqNo: newAsk.SeqNo, + } + oldSig, err := resign(context.TODO(), oldAsk) + if err != nil { + return err + } + return cborutil.WriteCborRPC(as.rw, &migrations.AskResponse0{ + Ask: &migrations.SignedStorageAsk0{ + Ask: oldAsk, + Signature: oldSig, + }, + }) +} + +func (as *legacyAskStream) Close() error { + return as.rw.Close() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_status_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_status_stream.go new file mode 100644 index 00000000000..a793a8eaacf --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_status_stream.go @@ -0,0 +1,99 @@ +package network + +import ( + "bufio" + "context" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +type legacyDealStatusStream struct { + p peer.ID + host host.Host + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ DealStatusStream = (*legacyDealStatusStream)(nil) + +func (d *legacyDealStatusStream) ReadDealStatusRequest() (DealStatusRequest, error) { + var q migrations.DealStatusRequest0 + + if err := q.UnmarshalCBOR(d.buffered); err != nil { + log.Warn(err) + return DealStatusRequestUndefined, err + } + return DealStatusRequest{ + Proposal: q.Proposal, + Signature: q.Signature, + }, nil +} + +func (d *legacyDealStatusStream) WriteDealStatusRequest(q DealStatusRequest) error { + return cborutil.WriteCborRPC(d.rw, &migrations.DealStatusRequest0{ + Proposal: q.Proposal, + Signature: q.Signature, + }) +} + +func (d *legacyDealStatusStream) ReadDealStatusResponse() (DealStatusResponse, []byte, error) { + var qr migrations.DealStatusResponse0 + + if err := qr.UnmarshalCBOR(d.buffered); err != nil { + return DealStatusResponseUndefined, nil, err + } + + origBytes, err := cborutil.Dump(&qr.DealState) + if err != nil { + return DealStatusResponseUndefined, nil, err + } + return DealStatusResponse{ + DealState: storagemarket.ProviderDealState{ + State: qr.DealState.State, + Message: qr.DealState.Message, + Proposal: qr.DealState.Proposal, + ProposalCid: qr.DealState.ProposalCid, + AddFundsCid: qr.DealState.AddFundsCid, + PublishCid: qr.DealState.PublishCid, + DealID: qr.DealState.DealID, + FastRetrieval: qr.DealState.FastRetrieval, + }, + Signature: qr.Signature, + }, origBytes, nil +} + +func (d *legacyDealStatusStream) WriteDealStatusResponse(qr DealStatusResponse, resign ResigningFunc) error { + oldDs := migrations.ProviderDealState0{ + State: qr.DealState.State, + Message: qr.DealState.Message, + Proposal: qr.DealState.Proposal, + ProposalCid: qr.DealState.ProposalCid, + AddFundsCid: qr.DealState.AddFundsCid, + PublishCid: qr.DealState.PublishCid, + DealID: qr.DealState.DealID, + FastRetrieval: qr.DealState.FastRetrieval, + } + oldSig, err := resign(context.TODO(), &oldDs) + if err != nil { + return err + } + return cborutil.WriteCborRPC(d.rw, &migrations.DealStatusResponse0{ + DealState: oldDs, + Signature: *oldSig, + }) +} + +func (d *legacyDealStatusStream) Close() error { + return d.rw.Close() +} + +func (d *legacyDealStatusStream) RemotePeer() peer.ID { + return d.p +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_stream.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_stream.go new file mode 100644 index 00000000000..8f4376bbb65 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/legacy_deal_stream.go @@ -0,0 +1,101 @@ +package network + +import ( + "bufio" + "context" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/go-fil-markets/storagemarket/migrations" +) + +type dealStreamv101 struct { + p peer.ID + host host.Host + rw network.MuxedStream + buffered *bufio.Reader +} + +var _ StorageDealStream = (*dealStreamv101)(nil) + +func (d *dealStreamv101) ReadDealProposal() (Proposal, error) { + var ds migrations.Proposal0 + + if err := ds.UnmarshalCBOR(d.buffered); err != nil { + log.Warn(err) + return ProposalUndefined, err + } + + return Proposal{ + DealProposal: ds.DealProposal, + Piece: migrations.MigrateDataRef0To1(ds.Piece), + FastRetrieval: ds.FastRetrieval, + }, nil +} + +func (d *dealStreamv101) WriteDealProposal(dp Proposal) error { + var piece *migrations.DataRef0 + if dp.Piece != nil { + piece = &migrations.DataRef0{ + TransferType: dp.Piece.TransferType, + Root: dp.Piece.Root, + PieceCid: dp.Piece.PieceCid, + PieceSize: dp.Piece.PieceSize, + } + } + return cborutil.WriteCborRPC(d.rw, &migrations.Proposal0{ + DealProposal: dp.DealProposal, + Piece: piece, + FastRetrieval: dp.FastRetrieval, + }) +} + +func (d *dealStreamv101) ReadDealResponse() (SignedResponse, []byte, error) { + var dr migrations.SignedResponse0 + + if err := dr.UnmarshalCBOR(d.buffered); err != nil { + return SignedResponseUndefined, nil, err + } + origBytes, err := cborutil.Dump(&dr.Response) + if err != nil { + return SignedResponseUndefined, nil, err + } + return SignedResponse{ + Response: Response{ + State: dr.Response.State, + Message: dr.Response.Message, + Proposal: dr.Response.Proposal, + PublishMessage: dr.Response.PublishMessage, + }, + Signature: dr.Signature, + }, origBytes, nil +} + +func (d *dealStreamv101) WriteDealResponse(dr SignedResponse, resign ResigningFunc) error { + oldResponse := migrations.Response0{ + State: dr.Response.State, + Message: dr.Response.Message, + Proposal: dr.Response.Proposal, + PublishMessage: dr.Response.PublishMessage, + } + oldSig, err := resign(context.TODO(), &oldResponse) + if err != nil { + return err + } + return cborutil.WriteCborRPC(d.rw, &migrations.SignedResponse0{ + Response: oldResponse, + Signature: oldSig, + }) +} + +func (d *dealStreamv101) Close() error { + return d.rw.Close() +} + +func (d *dealStreamv101) RemotePeer() peer.ID { + return d.p +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl.go new file mode 100644 index 00000000000..60ddbe29403 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl.go @@ -0,0 +1,224 @@ +package network + +import ( + "bufio" + "context" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +var log = logging.Logger("storagemarket_network") + +// Option is an option for configuring the libp2p storage market network +type Option func(*libp2pStorageMarketNetwork) + +// RetryParameters changes the default parameters around connection reopening +func RetryParameters(minDuration time.Duration, maxDuration time.Duration, attempts float64, backoffFactor float64) Option { + return func(impl *libp2pStorageMarketNetwork) { + impl.retryStream.SetOptions(shared.RetryParameters(minDuration, maxDuration, attempts, backoffFactor)) + } +} + +// SupportedAskProtocols sets what ask protocols this network instances listens on +func SupportedAskProtocols(supportedProtocols []protocol.ID) Option { + return func(impl *libp2pStorageMarketNetwork) { + impl.supportedAskProtocols = supportedProtocols + } +} + +// SupportedDealProtocols sets what deal protocols this network instances listens on +func SupportedDealProtocols(supportedProtocols []protocol.ID) Option { + return func(impl *libp2pStorageMarketNetwork) { + impl.supportedDealProtocols = supportedProtocols + } +} + +// SupportedDealStatusProtocols sets what deal status protocols this network instances listens on +func SupportedDealStatusProtocols(supportedProtocols []protocol.ID) Option { + return func(impl *libp2pStorageMarketNetwork) { + impl.supportedDealStatusProtocols = supportedProtocols + } +} + +// NewFromLibp2pHost builds a storage market network on top of libp2p +func NewFromLibp2pHost(h host.Host, options ...Option) StorageMarketNetwork { + impl := &libp2pStorageMarketNetwork{ + host: h, + retryStream: shared.NewRetryStream(h), + supportedAskProtocols: []protocol.ID{ + storagemarket.AskProtocolID, + storagemarket.OldAskProtocolID, + }, + supportedDealProtocols: []protocol.ID{ + storagemarket.DealProtocolID111, + storagemarket.DealProtocolID110, + storagemarket.DealProtocolID101, + }, + supportedDealStatusProtocols: []protocol.ID{ + storagemarket.DealStatusProtocolID, + storagemarket.OldDealStatusProtocolID, + }, + } + for _, option := range options { + option(impl) + } + return impl +} + +// libp2pStorageMarketNetwork transforms the libp2p host interface, which sends and receives +// NetMessage objects, into the graphsync network interface. +type libp2pStorageMarketNetwork struct { + host host.Host + retryStream *shared.RetryStream + // inbound messages from the network are forwarded to the receiver + receiver StorageReceiver + supportedAskProtocols []protocol.ID + supportedDealProtocols []protocol.ID + supportedDealStatusProtocols []protocol.ID +} + +func (impl *libp2pStorageMarketNetwork) NewAskStream(ctx context.Context, id peer.ID) (StorageAskStream, error) { + s, err := impl.retryStream.OpenStream(ctx, id, impl.supportedAskProtocols) + if err != nil { + log.Warn(err) + return nil, err + } + buffered := bufio.NewReaderSize(s, 16) + if s.Protocol() == storagemarket.OldAskProtocolID { + return &legacyAskStream{p: id, rw: s, buffered: buffered}, nil + } + return &askStream{p: id, rw: s, buffered: buffered}, nil +} + +func (impl *libp2pStorageMarketNetwork) NewDealStream(ctx context.Context, id peer.ID) (StorageDealStream, error) { + s, err := impl.retryStream.OpenStream(ctx, id, impl.supportedDealProtocols) + if err != nil { + return nil, err + } + buffered := bufio.NewReaderSize(s, 16) + switch s.Protocol() { + case storagemarket.DealProtocolID101: + return &dealStreamv101{p: id, rw: s, buffered: buffered, host: impl.host}, nil + case storagemarket.DealProtocolID110: + return &dealStreamv110{p: id, rw: s, buffered: buffered, host: impl.host}, nil + default: + return &dealStreamv111{p: id, rw: s, buffered: buffered, host: impl.host}, nil + } +} + +func (impl *libp2pStorageMarketNetwork) NewDealStatusStream(ctx context.Context, id peer.ID) (DealStatusStream, error) { + s, err := impl.retryStream.OpenStream(ctx, id, impl.supportedDealStatusProtocols) + if err != nil { + log.Warn(err) + return nil, err + } + buffered := bufio.NewReaderSize(s, 16) + if s.Protocol() == storagemarket.OldDealStatusProtocolID { + return &legacyDealStatusStream{p: id, rw: s, buffered: buffered}, nil + } + return &dealStatusStream{p: id, rw: s, buffered: buffered}, nil +} + +func (impl *libp2pStorageMarketNetwork) SetDelegate(r StorageReceiver) error { + impl.receiver = r + for _, proto := range impl.supportedAskProtocols { + impl.host.SetStreamHandler(proto, impl.handleNewAskStream) + } + for _, proto := range impl.supportedDealProtocols { + impl.host.SetStreamHandler(proto, impl.handleNewDealStream) + } + for _, proto := range impl.supportedDealStatusProtocols { + impl.host.SetStreamHandler(proto, impl.handleNewDealStatusStream) + } + return nil +} + +func (impl *libp2pStorageMarketNetwork) StopHandlingRequests() error { + impl.receiver = nil + for _, proto := range impl.supportedAskProtocols { + impl.host.RemoveStreamHandler(proto) + } + for _, proto := range impl.supportedDealProtocols { + impl.host.RemoveStreamHandler(proto) + } + for _, proto := range impl.supportedDealStatusProtocols { + impl.host.RemoveStreamHandler(proto) + } + return nil +} + +func (impl *libp2pStorageMarketNetwork) handleNewAskStream(s network.Stream) { + reader := impl.getReaderOrReset(s) + if reader != nil { + var as StorageAskStream + if s.Protocol() == storagemarket.OldAskProtocolID { + as = &legacyAskStream{s.Conn().RemotePeer(), s, reader} + } else { + as = &askStream{s.Conn().RemotePeer(), s, reader} + } + impl.receiver.HandleAskStream(as) + } +} + +func (impl *libp2pStorageMarketNetwork) handleNewDealStream(s network.Stream) { + reader := impl.getReaderOrReset(s) + if reader != nil { + var ds StorageDealStream + switch s.Protocol() { + case storagemarket.DealProtocolID101: + ds = &dealStreamv101{s.Conn().RemotePeer(), impl.host, s, reader} + case storagemarket.DealProtocolID110: + ds = &dealStreamv110{s.Conn().RemotePeer(), impl.host, s, reader} + default: + ds = &dealStreamv111{s.Conn().RemotePeer(), impl.host, s, reader} + } + impl.receiver.HandleDealStream(ds) + } +} + +func (impl *libp2pStorageMarketNetwork) handleNewDealStatusStream(s network.Stream) { + reader := impl.getReaderOrReset(s) + if reader != nil { + var qs DealStatusStream + if s.Protocol() == storagemarket.OldDealStatusProtocolID { + qs = &legacyDealStatusStream{s.Conn().RemotePeer(), impl.host, s, reader} + } else { + qs = &dealStatusStream{s.Conn().RemotePeer(), impl.host, s, reader} + } + impl.receiver.HandleDealStatusStream(qs) + } +} + +func (impl *libp2pStorageMarketNetwork) getReaderOrReset(s network.Stream) *bufio.Reader { + if impl.receiver == nil { + log.Warn("no receiver set") + s.Reset() // nolint: errcheck,gosec + return nil + } + return bufio.NewReaderSize(s, 16) +} + +func (impl *libp2pStorageMarketNetwork) ID() peer.ID { + return impl.host.ID() +} + +func (impl *libp2pStorageMarketNetwork) AddAddrs(p peer.ID, addrs []ma.Multiaddr) { + impl.host.Peerstore().AddAddrs(p, addrs, 8*time.Hour) +} + +func (impl *libp2pStorageMarketNetwork) TagPeer(p peer.ID, id string) { + impl.host.ConnManager().TagPeer(p, id, TagPriority) +} + +func (impl *libp2pStorageMarketNetwork) UntagPeer(p peer.ID, id string) { + impl.host.ConnManager().UntagPeer(p, id) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl_test.go new file mode 100644 index 00000000000..7f89d955a6a --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/libp2p_impl_test.go @@ -0,0 +1,696 @@ +package network_test + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" +) + +type testReceiver struct { + t *testing.T + dealStreamHandler func(network.StorageDealStream) + askStreamHandler func(network.StorageAskStream) + dealStatusStreamHandler func(stream network.DealStatusStream) +} + +var _ network.StorageReceiver = &testReceiver{} + +func (tr *testReceiver) HandleDealStream(s network.StorageDealStream) { + defer s.Close() + if tr.dealStreamHandler != nil { + tr.dealStreamHandler(s) + } +} + +func (tr *testReceiver) HandleAskStream(s network.StorageAskStream) { + defer s.Close() + if tr.askStreamHandler != nil { + tr.askStreamHandler(s) + } +} + +func (tr *testReceiver) HandleDealStatusStream(s network.DealStatusStream) { + defer s.Close() + if tr.dealStatusStreamHandler != nil { + tr.dealStatusStreamHandler(s) + } +} + +func TestOpenStreamWithRetries(t *testing.T) { + ctx := context.Background() + td := shared_testutil.NewLibp2pTestData(ctx, t) + + fromNetwork := network.NewFromLibp2pHost(td.Host1, network.RetryParameters(1*time.Second, 10*time.Second, 5, 5)) + toNetwork := network.NewFromLibp2pHost(td.Host2) + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets a receiver that will start after some time -> so we can verify exponential backoff kicks in + require.NoError(t, td.Host2.Close()) + achan := make(chan network.AskRequest) + tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { + readq, err := s.ReadAskRequest() + require.NoError(t, err) + achan <- readq + }} + + var err error + + go func() { + select { + case <-time.After(3 * time.Second): + err = toNetwork.SetDelegate(tr2) + case <-ctx.Done(): + return + } + }() + + // setup query stream host1 --> host 2 + assertAskRequestReceived(ctx, t, fromNetwork, toHost, achan) + assert.NoError(t, err) +} + +func TestAskStreamSendReceiveAskRequest(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedAskProtocols([]protocol.ID{storagemarket.OldAskProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedAskProtocols([]protocol.ID{storagemarket.OldAskProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + achan := make(chan network.AskRequest) + tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { + readq, err := s.ReadAskRequest() + require.NoError(t, err) + achan <- readq + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + // setup query stream host1 --> host 2 + assertAskRequestReceived(ctx, t, fromNetwork, toHost, achan) + }) + } +} + +func TestAskStreamSendReceiveAskResponse(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedAskProtocols([]protocol.ID{storagemarket.OldAskProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedAskProtocols([]protocol.ID{storagemarket.OldAskProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + achan := make(chan network.AskResponse) + tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { + a, _, err := s.ReadAskResponse() + require.NoError(t, err) + achan <- a + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + assertAskResponseReceived(ctx, t, fromNetwork, toHost, achan) + }) + } +} + +func TestAskStreamSendReceiveMultipleSuccessful(t *testing.T) { + // send query, read in handler, send response back, read response + ctxBg := context.Background() + td := shared_testutil.NewLibp2pTestData(ctxBg, t) + nw1 := network.NewFromLibp2pHost(td.Host1) + nw2 := network.NewFromLibp2pHost(td.Host2) + require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) + + // host2 gets a query and sends a response + ar := shared_testutil.MakeTestStorageAskResponse() + done := make(chan bool) + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return nil, nil + } + tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { + _, err := s.ReadAskRequest() + require.NoError(t, err) + + require.NoError(t, s.WriteAskResponse(ar, resigningFunc)) + done <- true + }} + require.NoError(t, nw2.SetDelegate(tr2)) + + ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) + defer cancel() + + qs, err := nw1.NewAskStream(ctx, td.Host2.ID()) + require.NoError(t, err) + + var resp network.AskResponse + go require.NoError(t, qs.WriteAskRequest(shared_testutil.MakeTestStorageAskRequest())) + resp, _, err = qs.ReadAskResponse() + require.NoError(t, err) + + select { + case <-ctx.Done(): + t.Error("response not received") + case <-done: + } + + assert.Equal(t, ar, resp) +} + +func TestDealStreamSendReceiveDealProposal(t *testing.T) { + // send proposal, read in handler + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedDealProtocols([]protocol.ID{storagemarket.DealProtocolID110})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedDealProtocols([]protocol.ID{storagemarket.DealProtocolID110})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + dchan := make(chan network.Proposal) + tr2 := &testReceiver{ + t: t, + dealStreamHandler: func(s network.StorageDealStream) { + readD, err := s.ReadDealProposal() + require.NoError(t, err) + dchan <- readD + }, + } + require.NoError(t, toNetwork.SetDelegate(tr2)) + + assertDealProposalReceived(ctx, t, fromNetwork, toHost, dchan) + }) + } +} + +func TestDealStreamSendReceiveDealResponse(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedDealProtocols([]protocol.ID{storagemarket.DealProtocolID110})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedDealProtocols([]protocol.ID{storagemarket.DealProtocolID110})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toPeer := td.Host2.ID() + + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + drChan := make(chan network.SignedResponse) + tr2 := &testReceiver{ + t: t, + dealStreamHandler: func(s network.StorageDealStream) { + readDP, _, err := s.ReadDealResponse() + require.NoError(t, err) + drChan <- readDP + }, + } + require.NoError(t, toNetwork.SetDelegate(tr2)) + assertDealResponseReceived(ctx, t, fromNetwork, toPeer, drChan) + }) + } +} + +func TestDealStreamSendReceiveMultipleSuccessful(t *testing.T) { + // send proposal, read in handler, send response back, + // read response, + + bgCtx := context.Background() + td := shared_testutil.NewLibp2pTestData(bgCtx, t) + fromNetwork := network.NewFromLibp2pHost(td.Host1) + toNetwork := network.NewFromLibp2pHost(td.Host2) + toPeer := td.Host2.ID() + + // set up stream handler, channels, and response + dr := shared_testutil.MakeTestStorageNetworkSignedResponse() + done := make(chan bool) + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return nil, nil + } + tr2 := &testReceiver{t: t, dealStreamHandler: func(s network.StorageDealStream) { + _, err := s.ReadDealProposal() + require.NoError(t, err) + + require.NoError(t, s.WriteDealResponse(dr, resigningFunc)) + done <- true + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + ctx, cancel := context.WithTimeout(bgCtx, 10*time.Second) + defer cancel() + + // start sending deal proposal + ds1, err := fromNetwork.NewDealStream(ctx, toPeer) + require.NoError(t, err) + + dp := shared_testutil.MakeTestStorageNetworkProposal() + + // write proposal + require.NoError(t, ds1.WriteDealProposal(dp)) + + // read response and verify it's the one we told toNetwork to send + responseReceived, _, err := ds1.ReadDealResponse() + require.NoError(t, err) + assert.Equal(t, dr, responseReceived) + + select { + case <-ctx.Done(): + t.Errorf("failed to receive messages") + case <-done: + } +} + +func TestDealStatusStreamSendReceiveRequest(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + achan := make(chan network.DealStatusRequest) + tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { + readq, err := s.ReadDealStatusRequest() + require.NoError(t, err) + achan <- readq + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + // setup query stream host1 --> host 2 + assertDealStatusRequestReceived(ctx, t, fromNetwork, toHost, achan) + }) + } +} + +func TestDealStatusStreamSendReceiveResponse(t *testing.T) { + ctx := context.Background() + + testCases := map[string]struct { + senderDisabledNew bool + receiverDisabledNew bool + }{ + "both clients current version": {}, + "sender old supports old queries": { + senderDisabledNew: true, + }, + "receiver only supports old queries": { + receiverDisabledNew: true, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + td := shared_testutil.NewLibp2pTestData(ctx, t) + var fromNetwork, toNetwork network.StorageMarketNetwork + if data.senderDisabledNew { + fromNetwork = network.NewFromLibp2pHost(td.Host1, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) + } else { + fromNetwork = network.NewFromLibp2pHost(td.Host1) + } + if data.receiverDisabledNew { + toNetwork = network.NewFromLibp2pHost(td.Host2, network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID})) + } else { + toNetwork = network.NewFromLibp2pHost(td.Host2) + } + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + achan := make(chan network.DealStatusResponse) + tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { + a, _, err := s.ReadDealStatusResponse() + require.NoError(t, err) + achan <- a + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + assertDealStatusResponseReceived(ctx, t, fromNetwork, toHost, achan) + }) + } +} + +func TestDealStatusStreamSendReceiveMultipleSuccessful(t *testing.T) { + // send query, read in handler, send response back, read response + ctxBg := context.Background() + td := shared_testutil.NewLibp2pTestData(ctxBg, t) + nw1 := network.NewFromLibp2pHost(td.Host1) + nw2 := network.NewFromLibp2pHost(td.Host2) + require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) + + // host2 gets a query and sends a response + ar := shared_testutil.MakeTestDealStatusResponse() + done := make(chan bool) + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return nil, nil + } + tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { + _, err := s.ReadDealStatusRequest() + require.NoError(t, err) + + require.NoError(t, s.WriteDealStatusResponse(ar, resigningFunc)) + done <- true + }} + require.NoError(t, nw2.SetDelegate(tr2)) + + ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) + defer cancel() + + qs, err := nw1.NewDealStatusStream(ctx, td.Host2.ID()) + require.NoError(t, err) + + var resp network.DealStatusResponse + go require.NoError(t, qs.WriteDealStatusRequest(shared_testutil.MakeTestDealStatusRequest())) + resp, _, err = qs.ReadDealStatusResponse() + require.NoError(t, err) + + select { + case <-ctx.Done(): + t.Error("response not received") + case <-done: + } + + assert.Equal(t, ar, resp) +} + +func TestLibp2pStorageMarketNetwork_StopHandlingRequests(t *testing.T) { + bgCtx := context.Background() + td := shared_testutil.NewLibp2pTestData(bgCtx, t) + + fromNetwork := network.NewFromLibp2pHost(td.Host1, network.RetryParameters(0, 0, 0, 0)) + toNetwork := network.NewFromLibp2pHost(td.Host2) + toHost := td.Host2.ID() + + // host1 gets no-op receiver + tr := &testReceiver{t: t} + require.NoError(t, fromNetwork.SetDelegate(tr)) + + // host2 gets receiver + achan := make(chan network.AskRequest) + tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { + readar, err := s.ReadAskRequest() + require.NoError(t, err) + achan <- readar + }} + require.NoError(t, toNetwork.SetDelegate(tr2)) + + require.NoError(t, toNetwork.StopHandlingRequests()) + + _, err := fromNetwork.NewAskStream(bgCtx, toHost) + require.Error(t, err, "protocol not supported") +} + +// assertDealProposalReceived performs the verification that a deal proposal is received +func assertDealProposalReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toPeer peer.ID, inChan chan network.Proposal) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + qs1, err := fromNetwork.NewDealStream(ctx, toPeer) + require.NoError(t, err) + + // send query to host2 + dp := shared_testutil.MakeTestStorageNetworkProposal() + require.NoError(t, qs1.WriteDealProposal(dp)) + + var dealReceived network.Proposal + select { + case <-ctx.Done(): + t.Error("deal proposal not received") + case dealReceived = <-inChan: + } + require.NotNil(t, dealReceived) + assert.Equal(t, dp, dealReceived) +} + +func assertDealResponseReceived(parentCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toPeer peer.ID, inChan chan network.SignedResponse) { + ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) + defer cancel() + + ds1, err := fromNetwork.NewDealStream(ctx, toPeer) + require.NoError(t, err) + + dr := shared_testutil.MakeTestStorageNetworkSignedResponse() + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return shared_testutil.MakeTestSignature(), nil + } + require.NoError(t, ds1.WriteDealResponse(dr, resigningFunc)) + + var responseReceived network.SignedResponse + select { + case <-ctx.Done(): + t.Error("response not received") + case responseReceived = <-inChan: + } + require.NotNil(t, responseReceived) + assert.Equal(t, dr.Response, responseReceived.Response) +} + +// assertAskRequestReceived performs the verification that a AskRequest is received +func assertAskRequestReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toHost peer.ID, achan chan network.AskRequest) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + as1, err := fromNetwork.NewAskStream(ctx, toHost) + require.NoError(t, err) + + // send query to host2 + a := shared_testutil.MakeTestStorageAskRequest() + require.NoError(t, as1.WriteAskRequest(a)) + + var ina network.AskRequest + select { + case <-ctx.Done(): + t.Error("msg not received") + case ina = <-achan: + } + require.NotNil(t, ina) + assert.Equal(t, a.Miner, ina.Miner) +} + +// assertAskResponseReceived performs the verification that a AskResponse is received +func assertAskResponseReceived(inCtx context.Context, t *testing.T, + fromNetwork network.StorageMarketNetwork, + toHost peer.ID, + achan chan network.AskResponse) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + // setup query stream host1 --> host 2 + as1, err := fromNetwork.NewAskStream(ctx, toHost) + require.NoError(t, err) + + // send queryresponse to host2 + ar := shared_testutil.MakeTestStorageAskResponse() + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return shared_testutil.MakeTestSignature(), nil + } + require.NoError(t, as1.WriteAskResponse(ar, resigningFunc)) + + // read queryresponse + var inar network.AskResponse + select { + case <-ctx.Done(): + t.Error("msg not received") + case inar = <-achan: + } + + require.NotNil(t, inar) + assert.Equal(t, ar.Ask.Ask, inar.Ask.Ask) +} + +// assertDealStatusRequestReceived performs the verification that a DealStatusRequest is received +func assertDealStatusRequestReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toHost peer.ID, achan chan network.DealStatusRequest) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + as1, err := fromNetwork.NewDealStatusStream(ctx, toHost) + require.NoError(t, err) + + // send query to host2 + a := shared_testutil.MakeTestDealStatusRequest() + require.NoError(t, as1.WriteDealStatusRequest(a)) + + var ina network.DealStatusRequest + select { + case <-ctx.Done(): + t.Error("msg not received") + case ina = <-achan: + } + require.NotNil(t, ina) + assert.Equal(t, a, ina) +} + +// assertDealStatusResponseReceived performs the verification that a QueryResponse is received +func assertDealStatusResponseReceived(inCtx context.Context, t *testing.T, + fromNetwork network.StorageMarketNetwork, + toHost peer.ID, + achan chan network.DealStatusResponse) { + ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) + defer cancel() + + // setup query stream host1 --> host 2 + as1, err := fromNetwork.NewDealStatusStream(ctx, toHost) + require.NoError(t, err) + + // send queryresponse to host2 + ar := shared_testutil.MakeTestDealStatusResponse() + var resigningFunc network.ResigningFunc = func(ctx context.Context, data interface{}) (*crypto.Signature, error) { + return shared_testutil.MakeTestSignature(), nil + } + require.NoError(t, as1.WriteDealStatusResponse(ar, resigningFunc)) + + // read queryresponse + var inar network.DealStatusResponse + select { + case <-ctx.Done(): + t.Error("msg not received") + case inar = <-achan: + } + + require.NotNil(t, inar) + assert.Equal(t, ar.DealState, inar.DealState) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/network.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/network.go new file mode 100644 index 00000000000..83e506b8799 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/network.go @@ -0,0 +1,74 @@ +package network + +import ( + "context" + + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-state-types/crypto" +) + +// ResigningFunc allows you to resign data as needed when downgrading a response +type ResigningFunc func(ctx context.Context, data interface{}) (*crypto.Signature, error) + +// These are the required interfaces that must be implemented to send and receive data +// for storage deals. + +// StorageAskStream is a stream for reading/writing requests & +// responses on the Storage Ask protocol +type StorageAskStream interface { + ReadAskRequest() (AskRequest, error) + WriteAskRequest(AskRequest) error + ReadAskResponse() (AskResponse, []byte, error) + WriteAskResponse(AskResponse, ResigningFunc) error + Close() error +} + +// StorageDealStream is a stream for reading and writing requests +// and responses on the storage deal protocol +type StorageDealStream interface { + ReadDealProposal() (Proposal, error) + WriteDealProposal(Proposal) error + ReadDealResponse() (SignedResponse, []byte, error) + WriteDealResponse(SignedResponse, ResigningFunc) error + RemotePeer() peer.ID + Close() error +} + +// DealStatusStream is a stream for reading and writing requests +// and responses on the deal status protocol +type DealStatusStream interface { + ReadDealStatusRequest() (DealStatusRequest, error) + WriteDealStatusRequest(DealStatusRequest) error + ReadDealStatusResponse() (DealStatusResponse, []byte, error) + WriteDealStatusResponse(DealStatusResponse, ResigningFunc) error + Close() error +} + +// StorageReceiver implements functions for receiving +// incoming data on storage protocols +type StorageReceiver interface { + HandleAskStream(StorageAskStream) + HandleDealStream(StorageDealStream) + HandleDealStatusStream(DealStatusStream) +} + +// StorageMarketNetwork is a network abstraction for the storage market +type StorageMarketNetwork interface { + NewAskStream(context.Context, peer.ID) (StorageAskStream, error) + NewDealStream(context.Context, peer.ID) (StorageDealStream, error) + NewDealStatusStream(context.Context, peer.ID) (DealStatusStream, error) + SetDelegate(StorageReceiver) error + StopHandlingRequests() error + ID() peer.ID + AddAddrs(peer.ID, []ma.Multiaddr) + + PeerTagger +} + +// PeerTagger implements arbitrary tagging of peers +type PeerTagger interface { + TagPeer(peer.ID, string) + UntagPeer(peer.ID, string) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types.go new file mode 100644 index 00000000000..36f9f07143b --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types.go @@ -0,0 +1,81 @@ +package network + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +//go:generate cbor-gen-for --map-encoding AskRequest AskResponse Proposal Response SignedResponse DealStatusRequest DealStatusResponse + +// Proposal is the data sent over the network from client to provider when proposing +// a deal +type Proposal struct { + DealProposal *market.ClientDealProposal + Piece *storagemarket.DataRef + FastRetrieval bool +} + +// ProposalUndefined is an empty Proposal message +var ProposalUndefined = Proposal{} + +// Response is a response to a proposal sent over the network +type Response struct { + State storagemarket.StorageDealStatus + + // DealProposalRejected + Message string + Proposal cid.Cid + + // StorageDealProposalAccepted + PublishMessage *cid.Cid +} + +// SignedResponse is a response that is signed +type SignedResponse struct { + Response Response + + Signature *crypto.Signature +} + +// SignedResponseUndefined represents an empty SignedResponse message +var SignedResponseUndefined = SignedResponse{} + +// AskRequest is a request for current ask parameters for a given miner +type AskRequest struct { + Miner address.Address +} + +// AskRequestUndefined represents and empty AskRequest message +var AskRequestUndefined = AskRequest{} + +// AskResponse is the response sent over the network in response +// to an ask request +type AskResponse struct { + Ask *storagemarket.SignedStorageAsk +} + +// AskResponseUndefined represents an empty AskResponse message +var AskResponseUndefined = AskResponse{} + +// DealStatusRequest sent by a client to query deal status +type DealStatusRequest struct { + Proposal cid.Cid + Signature crypto.Signature +} + +// DealStatusRequestUndefined represents an empty DealStatusRequest message +var DealStatusRequestUndefined = DealStatusRequest{} + +// DealStatusResponse is a provider's response to DealStatusRequest +type DealStatusResponse struct { + DealState storagemarket.ProviderDealState + Signature crypto.Signature +} + +// DealStatusResponseUndefined represents an empty DealStatusResponse message +var DealStatusResponseUndefined = DealStatusResponse{} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types_cbor_gen.go new file mode 100644 index 00000000000..dc5bd177736 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/network/types_cbor_gen.go @@ -0,0 +1,927 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package network + +import ( + "fmt" + "io" + "math" + "sort" + + storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *AskRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *AskResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Ask (storagemarket.SignedStorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *AskResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = AskResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("AskResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (storagemarket.SignedStorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(storagemarket.SignedStorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Proposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{163}); err != nil { + return err + } + + // t.DealProposal (market.ClientDealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.Piece (storagemarket.DataRef) (struct) + if len("Piece") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Piece\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Piece")); err != nil { + return err + } + + if err := t.Piece.MarshalCBOR(cw); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *Proposal) UnmarshalCBOR(r io.Reader) (err error) { + *t = Proposal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Proposal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (market.ClientDealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.ClientDealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.Piece (storagemarket.DataRef) (struct) + case "Piece": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Piece = new(storagemarket.DataRef) + if err := t.Piece.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) + } + } + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Response) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{164}); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Proposal")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishMessage")); err != nil { + return err + } + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + return nil +} + +func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { + *t = Response{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Response: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (cid.Cid) (struct) + case "Proposal": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *SignedResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Response (network.Response) (struct) + if len("Response") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Response\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Response"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Response")); err != nil { + return err + } + + if err := t.Response.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Response (network.Response) (struct) + case "Response": + + { + + if err := t.Response.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Response: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusRequest) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Proposal (cid.Cid) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Proposal")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusRequest) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusRequest{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusRequest: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Proposal (cid.Cid) (struct) + case "Proposal": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStatusResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.DealState (storagemarket.ProviderDealState) (struct) + if len("DealState") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealState\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealState"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealState")); err != nil { + return err + } + + if err := t.DealState.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *DealStatusResponse) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStatusResponse{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStatusResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealState (storagemarket.ProviderDealState) (struct) + case "DealState": + + { + + if err := t.DealState.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealState: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/nodes.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/nodes.go new file mode 100644 index 00000000000..67996b37fba --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/nodes.go @@ -0,0 +1,129 @@ +package storagemarket + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// DealSectorPreCommittedCallback is a callback that runs when a sector is pre-committed +// sectorNumber: the number of the sector that the deal is in +// isActive: the deal is already active +type DealSectorPreCommittedCallback func(sectorNumber abi.SectorNumber, isActive bool, err error) + +// DealSectorCommittedCallback is a callback that runs when a sector is committed +type DealSectorCommittedCallback func(err error) + +// DealExpiredCallback is a callback that runs when a deal expires +type DealExpiredCallback func(err error) + +// DealSlashedCallback is a callback that runs when a deal gets slashed +type DealSlashedCallback func(slashEpoch abi.ChainEpoch, err error) + +// StorageCommon are common interfaces provided by a filecoin Node to both StorageClient and StorageProvider +type StorageCommon interface { + + // GetChainHead returns a tipset token for the current chain head + GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) + + // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. + AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) + + // ReserveFunds reserves the given amount of funds is ensures it is available for the deal + ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) + + // ReleaseFunds releases funds reserved with ReserveFunds + ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error + + // GetBalance returns locked/unlocked for a storage participant. Used by both providers and clients. + GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) + + // VerifySignature verifies a given set of data was signed properly by a given address's private key + VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) + + // WaitForMessage waits until a message appears on chain. If it is already on chain, the callback is called immediately + WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error + + // SignsBytes signs the given data with the given address's private key + SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) + + // DealProviderCollateralBounds returns the min and max collateral a storage provider can issue. + DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) + + // OnDealSectorPreCommitted waits for a deal's sector to be pre-committed + OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorPreCommittedCallback) error + + // OnDealSectorCommitted waits for a deal's sector to be sealed and proved, indicating the deal is active + OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid *cid.Cid, cb DealSectorCommittedCallback) error + + // OnDealExpiredOrSlashed registers callbacks to be called when the deal expires or is slashed + OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired DealExpiredCallback, onDealSlashed DealSlashedCallback) error +} + +// PackingResult returns information about how a deal was put into a sector +type PackingResult struct { + SectorNumber abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.PaddedPieceSize +} + +// PublishDealsWaitResult is the result of a call to wait for publish deals to +// appear on chain +type PublishDealsWaitResult struct { + DealID abi.DealID + FinalCid cid.Cid +} + +// StorageProviderNode are node dependencies for a StorageProvider +type StorageProviderNode interface { + StorageCommon + + // PublishDeals publishes a deal on chain, returns the message cid, but does not wait for message to appear + PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) + + // WaitForPublishDeals waits for a deal publish message to land on chain. + WaitForPublishDeals(ctx context.Context, mcid cid.Cid, proposal market.DealProposal) (*PublishDealsWaitResult, error) + + // OnDealComplete is called when a deal is complete and on chain, and data has been transferred and is ready to be added to a sector + OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader shared.ReadSeekStarter) (*PackingResult, error) + + OnDealCompleteOfSxx(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader shared.ReadSeekStarter) (*PackingResult, error) + + // GetMinerWorkerAddress returns the worker address associated with a miner + GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) + + // GetDataCap gets the current data cap for addr + GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) + + // GetProofType gets the current seal proof type for the given miner. + GetProofType(ctx context.Context, addr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) +} + +// StorageClientNode are node dependencies for a StorageClient +type StorageClientNode interface { + StorageCommon + + // GetStorageProviders returns information about known miners + ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*StorageProviderInfo, error) + + // ValidatePublishedDeal verifies a deal is published on chain and returns the dealID + ValidatePublishedDeal(ctx context.Context, deal ClientDeal) (abi.DealID, error) + + // SignProposal signs a DealProposal + SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal) (*market.ClientDealProposal, error) + + // GetDefaultWalletAddress returns the address for this client + GetDefaultWalletAddress(ctx context.Context) (address.Address, error) + + // GetMinerInfo returns info for a single miner with the given address + GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (*StorageProviderInfo, error) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/provider.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/provider.go new file mode 100644 index 00000000000..38b51536c4c --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/provider.go @@ -0,0 +1,72 @@ +package storagemarket + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// ProviderSubscriber is a callback that is run when events are emitted on a StorageProvider +type ProviderSubscriber func(event ProviderEvent, deal MinerDeal) + +// StorageProvider provides an interface to the storage market for a single +// storage miner. +type StorageProvider interface { + + // Start initializes deal processing on a StorageProvider and restarts in progress deals. + // It also registers the provider with a StorageMarketNetwork so it can receive incoming + // messages on the storage market's libp2p protocols + Start(ctx context.Context) error + + // OnReady registers a listener for when the provider comes on line + OnReady(shared.ReadyFunc) + + // Stop terminates processing of deals on a StorageProvider + Stop() error + + // SetAsk configures the storage miner's ask with the provided prices (for unverified and verified deals), + // duration, and options. Any previously-existing ask is replaced. + SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...StorageAskOption) error + + // GetAsk returns the storage miner's ask, or nil if one does not exist. + GetAsk() *SignedStorageAsk + + // GetLocalDeal gets a deal by signed proposal cid + GetLocalDeal(cid cid.Cid) (MinerDeal, error) + + // LocalDealCount gets the number of local deals + LocalDealCount() (int, error) + + // ListLocalDeals lists deals processed by this storage provider + ListLocalDeals() ([]MinerDeal, error) + + // ListLocalDealsPage lists deals by creation time descending, starting + // at the deal with the given signed proposal cid, skipping offset deals + // and returning up to limit deals + ListLocalDealsPage(startPropCid *cid.Cid, offset int, limit int) ([]MinerDeal, error) + + // AddStorageCollateral adds storage collateral + AddStorageCollateral(ctx context.Context, amount abi.TokenAmount) error + + // GetStorageCollateral returns the current collateral balance + GetStorageCollateral(ctx context.Context) (Balance, error) + + // ImportDataForDeal manually imports data for an offline storage deal + ImportDataForDeal(ctx context.Context, propCid cid.Cid, data io.Reader) error + + ImportDataForDealOfSxx(ctx context.Context, propCid cid.Cid, path string, worker string) error + + // SubscribeToEvents listens for events that happen related to storage deals on a provider + SubscribeToEvents(subscriber ProviderSubscriber) shared.Unsubscribe + + RetryDealPublishing(propCid cid.Cid) error + + AnnounceDealToIndexer(ctx context.Context, proposalCid cid.Cid) error + + AnnounceAllDealsToIndexer(ctx context.Context) error +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/dependencies/dependencies.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/dependencies/dependencies.go new file mode 100644 index 00000000000..8497b209cbf --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/dependencies/dependencies.go @@ -0,0 +1,201 @@ +package dependencies + +import ( + "bytes" + "context" + "io/ioutil" + "math/rand" + "os" + "testing" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + graphsyncimpl "github.com/ipfs/go-graphsync/impl" + "github.com/ipfs/go-graphsync/network" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + network2 "github.com/filecoin-project/go-data-transfer/network" + "github.com/filecoin-project/go-data-transfer/testutil" + dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/filestore" + "github.com/filecoin-project/go-fil-markets/piecestore" + piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" + "github.com/filecoin-project/go-fil-markets/stores" +) + +// StorageDependencies are the dependencies required to initialize a storage client/provider +type StorageDependencies struct { + Ctx context.Context + Epoch abi.ChainEpoch + ProviderAddr address.Address + ClientAddr address.Address + ClientNode *testnodes.FakeClientNode + ProviderNode *testnodes.FakeProviderNode + SMState *testnodes.StorageMarketState + TempFilePath string + ProviderInfo storagemarket.StorageProviderInfo + TestData *shared_testutil.Libp2pTestData + PieceStore piecestore.PieceStore + DagStore stores.DAGStoreWrapper + DTClient datatransfer.Manager + DTProvider datatransfer.Manager + PeerResolver *discoveryimpl.Local + ClientDelayFakeCommonNode testnodes.DelayFakeCommonNode + ProviderClientDelayFakeCommonNode testnodes.DelayFakeCommonNode + Fs filestore.FileStore + StoredAsk *storedask.StoredAsk +} + +func NewDependenciesWithTestData(t *testing.T, + ctx context.Context, + td *shared_testutil.Libp2pTestData, + smState *testnodes.StorageMarketState, + tempPath string, + cd testnodes.DelayFakeCommonNode, + pd testnodes.DelayFakeCommonNode, +) *StorageDependencies { + return NewDepGenerator().New(t, ctx, td, smState, tempPath, cd, pd) +} + +type NewDataTransfer func(ds datastore.Batching, cidListsDir string, dataTransferNetwork network2.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) + +func defaultNewDataTransfer(ds datastore.Batching, dir string, transferNetwork network2.DataTransferNetwork, transport datatransfer.Transport) (datatransfer.Manager, error) { + return dtimpl.NewDataTransfer(ds, transferNetwork, transport) +} + +type DepGenerator struct { + ClientNewDataTransfer NewDataTransfer + ProviderNewDataTransfer NewDataTransfer +} + +func NewDepGenerator() *DepGenerator { + return &DepGenerator{ + ClientNewDataTransfer: defaultNewDataTransfer, + ProviderNewDataTransfer: defaultNewDataTransfer, + } +} + +func (gen *DepGenerator) New( + t *testing.T, + ctx context.Context, + td *shared_testutil.Libp2pTestData, + smState *testnodes.StorageMarketState, + tempPath string, + cd testnodes.DelayFakeCommonNode, + pd testnodes.DelayFakeCommonNode, +) *StorageDependencies { + cd.OnDealSectorCommittedChan = make(chan struct{}) + cd.OnDealExpiredOrSlashedChan = make(chan struct{}) + + pd.OnDealSectorCommittedChan = make(chan struct{}) + pd.OnDealExpiredOrSlashedChan = make(chan struct{}) + + epoch := abi.ChainEpoch(100) + + clientNode := testnodes.FakeClientNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + SMState: smState, + DealFunds: shared_testutil.NewTestDealFunds(), + DelayFakeCommonNode: cd}, + ClientAddr: address.TestAddress, + ExpectedMinerInfos: []address.Address{address.TestAddress2}, + } + + expDealID := abi.DealID(rand.Uint64()) + psdReturn := market.PublishStorageDealsReturn{IDs: []abi.DealID{expDealID}} + psdReturnBytes := bytes.NewBuffer([]byte{}) + err := psdReturn.MarshalCBOR(psdReturnBytes) + assert.NoError(t, err) + + providerAddr := address.TestAddress2 + + if len(tempPath) == 0 { + tempPath, err = ioutil.TempDir("", "storagemarket_test") + assert.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(tempPath) }) + } + + ps, err := piecestoreimpl.NewPieceStore(td.Ds2) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, ps) + + providerNode := &testnodes.FakeProviderNode{ + FakeCommonNode: testnodes.FakeCommonNode{ + DelayFakeCommonNode: pd, + SMState: smState, + DealFunds: shared_testutil.NewTestDealFunds(), + WaitForMessageRetBytes: psdReturnBytes.Bytes(), + }, + MinerAddr: providerAddr, + } + fs, err := filestore.NewLocalFileStore(filestore.OsPath(tempPath)) + assert.NoError(t, err) + + dagStore := shared_testutil.NewMockDagStoreWrapper(nil, nil) + + // create provider and client + + gs1 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(td.Host1), td.LinkSystem1) + dtTransport1 := dtgstransport.NewTransport(td.Host1.ID(), gs1) + dt1, err := gen.ClientNewDataTransfer(td.DTStore1, td.DTTmpDir1, td.DTNet1, dtTransport1) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt1) + + discovery, err := discoveryimpl.NewLocal(namespace.Wrap(td.Ds1, datastore.NewKey("/deals/local"))) + require.NoError(t, err) + shared_testutil.StartAndWaitForReady(ctx, t, discovery) + + gs2 := graphsyncimpl.New(ctx, network.NewFromLibp2pHost(td.Host2), td.LinkSystem2) + dtTransport2 := dtgstransport.NewTransport(td.Host2.ID(), gs2) + dt2, err := gen.ProviderNewDataTransfer(td.DTStore2, td.DTTmpDir2, td.DTNet2, dtTransport2) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt2) + + storedAskDs := namespace.Wrap(td.Ds2, datastore.NewKey("/storage/ask")) + storedAsk, err := storedask.NewStoredAsk(storedAskDs, datastore.NewKey("latest-ask"), providerNode, providerAddr) + assert.NoError(t, err) + + // Closely follows the MinerInfo struct in the spec + providerInfo := storagemarket.StorageProviderInfo{ + Address: providerAddr, + Owner: providerAddr, + Worker: providerAddr, + SectorSize: 1 << 20, + PeerID: td.Host2.ID(), + } + + smState.Providers = map[address.Address]*storagemarket.StorageProviderInfo{providerAddr: &providerInfo} + return &StorageDependencies{ + Ctx: ctx, + Epoch: epoch, + ClientAddr: clientNode.ClientAddr, + ProviderAddr: providerAddr, + ClientNode: &clientNode, + ProviderNode: providerNode, + ProviderInfo: providerInfo, + TestData: td, + SMState: smState, + TempFilePath: tempPath, + ClientDelayFakeCommonNode: cd, + ProviderClientDelayFakeCommonNode: pd, + DagStore: dagStore, + DTClient: dt1, + DTProvider: dt2, + PeerResolver: discovery, + PieceStore: ps, + Fs: fs, + StoredAsk: storedAsk, + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/testharness.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/testharness.go new file mode 100644 index 00000000000..7c8ffcf0bfb --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testharness/testharness.go @@ -0,0 +1,207 @@ +package testharness + +import ( + "os" + "path/filepath" + "sync" + "testing" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + graphsyncimpl "github.com/ipfs/go-graphsync/impl" + gsnetwork "github.com/ipfs/go-graphsync/network" + bstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + + dtimpl "github.com/filecoin-project/go-data-transfer/impl" + "github.com/filecoin-project/go-data-transfer/testutil" + dtgstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" + storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket/testharness/dependencies" + "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" + "github.com/filecoin-project/go-fil-markets/stores" +) + +type StorageHarness struct { + *dependencies.StorageDependencies + PayloadCid cid.Cid + Client storagemarket.StorageClient + Provider storagemarket.StorageProvider + Data bstore.Blockstore + ReferenceProvider *shared_testutil.MockIndexProvider +} + +func NewHarness(t *testing.T, ctx context.Context, useStore bool, cd testnodes.DelayFakeCommonNode, pd testnodes.DelayFakeCommonNode, + disableNewDeals bool, fName ...string) *StorageHarness { + smState := testnodes.NewStorageMarketState() + td := shared_testutil.NewLibp2pTestData(ctx, t) + deps := dependencies.NewDependenciesWithTestData(t, ctx, td, smState, "", cd, pd) + + return NewHarnessWithTestData(t, td, deps, useStore, disableNewDeals, fName...) +} + +type MeshCreatorStub struct { +} + +func (m *MeshCreatorStub) Connect(context.Context) error { + return nil +} + +func NewHarnessWithTestData(t *testing.T, td *shared_testutil.Libp2pTestData, deps *dependencies.StorageDependencies, useStore bool, disableNewDeals bool, files ...string) *StorageHarness { + var file string + if len(files) == 0 { + file = "payload.txt" + } else { + file = files[0] + } + + fPath := filepath.Join(shared_testutil.ThisDir(t), "../fixtures/"+file) + + var rootLink ipld.Link + var path string + // TODO Both functions here should return the root cid of the UnixFSDag and the carv2 file path. + if useStore { + rootLink, path = td.LoadUnixFSFileToStore(t, fPath) + } else { + rootLink, path = td.LoadUnixFSFile(t, fPath, false) + } + t.Cleanup(func() { _ = os.Remove(path) }) + + payloadCid := rootLink.(cidlink.Link).Cid + + ba := shared_testutil.NewTestStorageBlockstoreAccessor() + bs, err := stores.ReadOnlyFilestore(path) + require.NoError(t, err) + ba.Blockstore = bs + t.Cleanup(func() { _ = bs.Close() }) + + // create provider and client + clientDs := namespace.Wrap(td.Ds1, datastore.NewKey("/deals/client")) + client, err := storageimpl.NewClient( + network.NewFromLibp2pHost(td.Host1, network.RetryParameters(0, 0, 0, 0)), + deps.DTClient, + deps.PeerResolver, + clientDs, + deps.ClientNode, + ba, + storageimpl.DealPollingInterval(0), + ) + require.NoError(t, err) + + providerDs := namespace.Wrap(td.Ds1, datastore.NewKey("/deals/provider")) + networkOptions := []network.Option{network.RetryParameters(0, 0, 0, 0)} + if disableNewDeals { + networkOptions = append(networkOptions, + network.SupportedAskProtocols([]protocol.ID{storagemarket.OldAskProtocolID}), + network.SupportedDealProtocols([]protocol.ID{storagemarket.DealProtocolID110}), + network.SupportedDealStatusProtocols([]protocol.ID{storagemarket.OldDealStatusProtocolID}), + ) + } + + rp := shared_testutil.NewMockIndexProvider() + + provider, err := storageimpl.NewProvider( + network.NewFromLibp2pHost(td.Host2, networkOptions...), + providerDs, + deps.Fs, + deps.DagStore, + rp, + deps.PieceStore, + deps.DTProvider, + deps.ProviderNode, + deps.ProviderAddr, + deps.StoredAsk, + &MeshCreatorStub{}, + ) + assert.NoError(t, err) + + // set ask price where we'll accept any price + err = provider.SetAsk(big.NewInt(0), big.NewInt(0), 50000) + assert.NoError(t, err) + + return &StorageHarness{ + StorageDependencies: deps, + PayloadCid: payloadCid, + Client: client, + Provider: provider, + Data: bs, + ReferenceProvider: rp, + } +} + +func (h *StorageHarness) CreateNewProvider(t *testing.T, ctx context.Context, td *shared_testutil.Libp2pTestData) storagemarket.StorageProvider { + gs2 := graphsyncimpl.New(ctx, gsnetwork.NewFromLibp2pHost(td.Host2), td.LinkSystem2) + dtTransport2 := dtgstransport.NewTransport(td.Host2.ID(), gs2) + dt2, err := dtimpl.NewDataTransfer(td.DTStore2, td.DTNet2, dtTransport2) + require.NoError(t, err) + testutil.StartAndWaitForReady(ctx, t, dt2) + + providerDs := namespace.Wrap(td.Ds1, datastore.NewKey("/deals/provider")) + pi := shared_testutil.NewMockIndexProvider() + + provider, err := storageimpl.NewProvider( + network.NewFromLibp2pHost(td.Host2, network.RetryParameters(0, 0, 0, 0)), + providerDs, + h.Fs, + h.DagStore, + pi, + h.PieceStore, + dt2, + h.ProviderNode, + h.ProviderAddr, + h.StoredAsk, + &MeshCreatorStub{}, + ) + require.NoError(t, err) + return provider +} + +func (h *StorageHarness) ProposeStorageDeal(t *testing.T, dataRef *storagemarket.DataRef, fastRetrieval, verifiedDeal bool) *storagemarket.ProposeStorageDealResult { + var dealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) + + result, err := h.Client.ProposeStorageDeal(h.Ctx, storagemarket.ProposeStorageDealParams{ + Addr: h.ClientAddr, + Info: &h.ProviderInfo, + Data: dataRef, + StartEpoch: h.Epoch + 100, + EndEpoch: h.Epoch + 100 + dealDuration, + Price: big.NewInt(1), + Collateral: big.NewInt(0), + Rt: abi.RegisteredSealProof_StackedDrg2KiBV1, + FastRetrieval: fastRetrieval, + VerifiedDeal: verifiedDeal, + }) + require.NoError(t, err) + return result +} + +func (h *StorageHarness) WaitForProviderEvent(wg *sync.WaitGroup, waitEvent storagemarket.ProviderEvent) { + wg.Add(1) + h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { + if event == waitEvent { + wg.Done() + } + }) +} + +func (h *StorageHarness) WaitForClientEvent(wg *sync.WaitGroup, waitEvent storagemarket.ClientEvent) { + wg.Add(1) + h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { + if event == waitEvent { + wg.Done() + } + }) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testnodes/testnodes.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testnodes/testnodes.go new file mode 100644 index 00000000000..d51187efcf4 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/testnodes/testnodes.go @@ -0,0 +1,429 @@ +// Package testnodes contains stubbed implementations of the StorageProviderNode +// and StorageClientNode interface to simulate communications with a filecoin node +package testnodes + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "sync" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/go-fil-markets/commp" + "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// Below fake node implementations + +// StorageMarketState represents a state for the storage market that can be inspected +// - methods on the provider nodes will affect this state +type StorageMarketState struct { + TipSetToken shared.TipSetToken + Epoch abi.ChainEpoch + DealID abi.DealID + Balances map[address.Address]abi.TokenAmount + Providers map[address.Address]*storagemarket.StorageProviderInfo +} + +// NewStorageMarketState returns a new empty state for the storage market +func NewStorageMarketState() *StorageMarketState { + return &StorageMarketState{ + Epoch: 0, + DealID: 0, + Balances: map[address.Address]abi.TokenAmount{}, + Providers: map[address.Address]*storagemarket.StorageProviderInfo{}, + } +} + +// AddFunds adds funds for a given address in the storage market +func (sma *StorageMarketState) AddFunds(addr address.Address, amount abi.TokenAmount) { + if existing, ok := sma.Balances[addr]; ok { + sma.Balances[addr] = big.Add(existing, amount) + } else { + sma.Balances[addr] = amount + } +} + +// Balance returns the balance of a given address in the market +func (sma *StorageMarketState) Balance(addr address.Address) storagemarket.Balance { + if existing, ok := sma.Balances[addr]; ok { + return storagemarket.Balance{Locked: big.NewInt(0), Available: existing} + } + return storagemarket.Balance{Locked: big.NewInt(0), Available: big.NewInt(0)} +} + +// StateKey returns a state key with the storage market states set Epoch +func (sma *StorageMarketState) StateKey() (shared.TipSetToken, abi.ChainEpoch) { + return sma.TipSetToken, sma.Epoch +} + +// FakeCommonNode implements common methods for the storage & client node adapters +// where responses are stubbed +type FakeCommonNode struct { + SMState *StorageMarketState + DealFunds *shared_testutil.TestDealFunds + AddFundsCid cid.Cid + ReserveFundsError error + VerifySignatureFails bool + GetBalanceError error + GetChainHeadError error + SignBytesError error + PreCommittedSectorNumber abi.SectorNumber + PreCommittedIsActive bool + DealPreCommittedSyncError error + DealPreCommittedAsyncError error + DealCommittedSyncError error + DealCommittedAsyncError error + WaitForDealCompletionError error + OnDealExpiredError error + OnDealSlashedError error + OnDealSlashedEpoch abi.ChainEpoch + + WaitForMessageBlocks bool + WaitForMessageError error + WaitForMessageExitCode exitcode.ExitCode + WaitForMessageRetBytes []byte + WaitForMessageFinalCid cid.Cid + WaitForMessageNodeError error + WaitForMessageCalls []cid.Cid + + DelayFakeCommonNode DelayFakeCommonNode +} + +// DelayFakeCommonNode allows configuring delay in the FakeCommonNode functions +type DelayFakeCommonNode struct { + OnDealSectorPreCommitted bool + OnDealSectorPreCommittedChan chan struct{} + + OnDealSectorCommitted bool + OnDealSectorCommittedChan chan struct{} + + OnDealExpiredOrSlashed bool + OnDealExpiredOrSlashedChan chan struct{} + + ValidatePublishedDeal bool + ValidatePublishedDealChan chan struct{} +} + +// GetChainHead returns the state id in the storage market state +func (n *FakeCommonNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { + if n.GetChainHeadError == nil { + key, epoch := n.SMState.StateKey() + return key, epoch, nil + } + + return []byte{}, 0, n.GetChainHeadError +} + +// AddFunds adds funds to the given actor in the storage market state +func (n *FakeCommonNode) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { + n.SMState.AddFunds(addr, amount) + return n.AddFundsCid, nil +} + +// ReserveFunds reserves funds required for a deal with the storage market actor +func (n *FakeCommonNode) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) { + if n.ReserveFundsError == nil { + _, _ = n.DealFunds.Reserve(amt) + balance := n.SMState.Balance(addr) + if balance.Available.LessThan(amt) { + return n.AddFunds(ctx, addr, big.Sub(amt, balance.Available)) + } + } + + return cid.Undef, n.ReserveFundsError +} + +// ReleaseFunds releases funds reserved with ReserveFunds +func (n *FakeCommonNode) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error { + n.DealFunds.Release(amt) + return nil +} + +// WaitForMessage simulates waiting for a message to appear on chain +func (n *FakeCommonNode) WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error { + n.WaitForMessageCalls = append(n.WaitForMessageCalls, mcid) + + if n.WaitForMessageError != nil { + return n.WaitForMessageError + } + + if n.WaitForMessageBlocks { + // just leave the test node in this state to simulate a long operation + return nil + } + + finalCid := n.WaitForMessageFinalCid + if finalCid.Equals(cid.Undef) { + finalCid = mcid + } + + return onCompletion(n.WaitForMessageExitCode, n.WaitForMessageRetBytes, finalCid, n.WaitForMessageNodeError) +} + +// GetBalance returns the funds in the storage market state +func (n *FakeCommonNode) GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (storagemarket.Balance, error) { + if n.GetBalanceError == nil { + return n.SMState.Balance(addr), nil + } + return storagemarket.Balance{}, n.GetBalanceError +} + +// VerifySignature just always returns true, for now +func (n *FakeCommonNode) VerifySignature(ctx context.Context, signature crypto.Signature, addr address.Address, data []byte, tok shared.TipSetToken) (bool, error) { + return !n.VerifySignatureFails, nil +} + +// SignBytes simulates signing data by returning a test signature +func (n *FakeCommonNode) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { + if n.SignBytesError == nil { + return shared_testutil.MakeTestSignature(), nil + } + return nil, n.SignBytesError +} + +func (n *FakeCommonNode) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) { + return abi.NewTokenAmount(5000), builtin.TotalFilecoin, nil +} + +// OnDealSectorPreCommitted returns immediately, and returns stubbed errors +func (n *FakeCommonNode) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal market.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error { + if n.DelayFakeCommonNode.OnDealSectorPreCommitted { + select { + case <-ctx.Done(): + return ctx.Err() + case <-n.DelayFakeCommonNode.OnDealSectorPreCommittedChan: + } + } + if n.DealPreCommittedSyncError == nil { + cb(n.PreCommittedSectorNumber, n.PreCommittedIsActive, n.DealPreCommittedAsyncError) + } + return n.DealPreCommittedSyncError +} + +// OnDealSectorCommitted returns immediately, and returns stubbed errors +func (n *FakeCommonNode) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error { + if n.DelayFakeCommonNode.OnDealSectorCommitted { + select { + case <-ctx.Done(): + return ctx.Err() + case <-n.DelayFakeCommonNode.OnDealSectorCommittedChan: + } + } + if n.DealCommittedSyncError == nil { + cb(n.DealCommittedAsyncError) + } + return n.DealCommittedSyncError +} + +// OnDealExpiredOrSlashed simulates waiting for a deal to be expired or slashed, but provides stubbed behavior +func (n *FakeCommonNode) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { + if n.DelayFakeCommonNode.OnDealExpiredOrSlashed { + select { + case <-ctx.Done(): + return ctx.Err() + case <-n.DelayFakeCommonNode.OnDealExpiredOrSlashedChan: + } + } + + if n.WaitForDealCompletionError != nil { + return n.WaitForDealCompletionError + } + + if n.OnDealSlashedError != nil { + onDealSlashed(abi.ChainEpoch(0), n.OnDealSlashedError) + return nil + } + + if n.OnDealExpiredError != nil { + onDealExpired(n.OnDealExpiredError) + return nil + } + + if n.OnDealSlashedEpoch == 0 { + onDealExpired(nil) + return nil + } + + onDealSlashed(n.OnDealSlashedEpoch, nil) + return nil +} + +var _ storagemarket.StorageCommon = (*FakeCommonNode)(nil) + +// FakeClientNode is a node adapter for a storage client whose responses +// are stubbed +type FakeClientNode struct { + FakeCommonNode + ClientAddr address.Address + MinerAddr address.Address + WorkerAddr address.Address + ValidationError error + ValidatePublishedDealID abi.DealID + ValidatePublishedError error + ExpectedMinerInfos []address.Address + receivedMinerInfos []address.Address +} + +// ListStorageProviders lists the providers in the storage market state +func (n *FakeClientNode) ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { + providers := make([]*storagemarket.StorageProviderInfo, 0, len(n.SMState.Providers)) + for _, provider := range n.SMState.Providers { + providers = append(providers, provider) + } + return providers, nil +} + +// ValidatePublishedDeal always succeeds +func (n *FakeClientNode) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { + if n.DelayFakeCommonNode.ValidatePublishedDeal { + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-n.DelayFakeCommonNode.ValidatePublishedDealChan: + } + } + + return n.ValidatePublishedDealID, n.ValidatePublishedError +} + +// SignProposal signs a deal with a dummy signature +func (n *FakeClientNode) SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal) (*market.ClientDealProposal, error) { + return &market.ClientDealProposal{ + Proposal: proposal, + ClientSignature: *shared_testutil.MakeTestSignature(), + }, nil +} + +// GetDefaultWalletAddress returns a stubbed ClientAddr +func (n *FakeClientNode) GetDefaultWalletAddress(ctx context.Context) (address.Address, error) { + return n.ClientAddr, nil +} + +// GetMinerInfo returns stubbed information for the first miner in storage market state +func (n *FakeClientNode) GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) { + n.receivedMinerInfos = append(n.receivedMinerInfos, maddr) + info, ok := n.SMState.Providers[maddr] + if !ok { + return nil, errors.New("Provider not found") + } + return info, nil +} + +func (n *FakeClientNode) VerifyExpectations(t *testing.T) { + require.Equal(t, n.ExpectedMinerInfos, n.receivedMinerInfos) +} + +var _ storagemarket.StorageClientNode = (*FakeClientNode)(nil) + +// FakeProviderNode implements functions specific to the StorageProviderNode +type FakeProviderNode struct { + FakeCommonNode + MinerAddr address.Address + MinerWorkerError error + PieceLength uint64 + PieceSectorID uint64 + PublishDealID abi.DealID + PublishDealsError error + WaitForPublishDealsError error + OnDealCompleteError error + OnDealCompleteSkipCommP bool + LastOnDealCompleteBytes []byte + OnDealCompleteCalls []storagemarket.MinerDeal + LocatePieceForDealWithinSectorError error + DataCap *verifreg.DataCap + GetDataCapErr error + + lk sync.Mutex + Sealed map[abi.SectorNumber]bool +} + +// PublishDeals simulates publishing a deal by adding it to the storage market state +func (n *FakeProviderNode) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { + if n.PublishDealsError == nil { + return shared_testutil.GenerateCids(1)[0], nil + } + return cid.Undef, n.PublishDealsError +} + +// WaitForPublishDeals simulates waiting for the deal to be published and +// calling the callback with the results +func (n *FakeProviderNode) WaitForPublishDeals(ctx context.Context, mcid cid.Cid, proposal market.DealProposal) (*storagemarket.PublishDealsWaitResult, error) { + if n.WaitForPublishDealsError != nil { + return nil, n.WaitForPublishDealsError + } + + finalCid := n.WaitForMessageFinalCid + if finalCid.Equals(cid.Undef) { + finalCid = mcid + } + + return &storagemarket.PublishDealsWaitResult{ + DealID: n.PublishDealID, + FinalCid: finalCid, + }, nil +} + +// OnDealComplete simulates passing of the deal to the storage miner, and does nothing +func (n *FakeProviderNode) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { + n.OnDealCompleteCalls = append(n.OnDealCompleteCalls, deal) + n.LastOnDealCompleteBytes, _ = ioutil.ReadAll(pieceReader) + + if n.OnDealCompleteError != nil || n.OnDealCompleteSkipCommP { + return &storagemarket.PackingResult{}, n.OnDealCompleteError + } + + // We read in all the bytes from the reader above, so seek back to the start + err := pieceReader.SeekStart() + if err != nil { + return nil, fmt.Errorf("on deal complete: seeking to start of piece data: %w", err) + } + + // Generate commP + pieceCID, err := commp.GenerateCommp(pieceReader, uint64(pieceSize), uint64(pieceSize)) + if err != nil { + return nil, fmt.Errorf("on deal complete: generating commp: %w", err) + } + + // Check that commP of the data matches the proposal piece CID + if pieceCID != deal.Proposal.PieceCID { + return nil, fmt.Errorf("on deal complete: proposal piece CID %s does not match calculated commP %s", deal.Proposal.PieceCID, pieceCID) + } + + return &storagemarket.PackingResult{}, n.OnDealCompleteError +} + +// GetMinerWorkerAddress returns the address specified by MinerAddr +func (n *FakeProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { + if n.MinerWorkerError == nil { + return n.MinerAddr, nil + } + return address.Undef, n.MinerWorkerError +} + +// GetDataCap gets the current data cap for addr +func (n *FakeProviderNode) GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) { + return n.DataCap, n.GetDataCapErr +} + +// GetProofType returns the miner's proof type. +func (n *FakeProviderNode) GetProofType(ctx context.Context, addr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) { + return abi.RegisteredSealProof_StackedDrg2KiBV1, nil +} + +var _ storagemarket.StorageProviderNode = (*FakeProviderNode)(nil) diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types.go new file mode 100644 index 00000000000..f0068dd26bd --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types.go @@ -0,0 +1,321 @@ +package storagemarket + +import ( + "fmt" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/go-fil-markets/filestore" +) + +var log = logging.Logger("storagemrkt") + +//go:generate cbor-gen-for --map-encoding ClientDeal MinerDeal Balance SignedStorageAsk StorageAsk DataRef ProviderDealState DealStages DealStage Log + +// The ID for the libp2p protocol for proposing storage deals. +const DealProtocolID101 = "/fil/storage/mk/1.0.1" +const DealProtocolID110 = "/fil/storage/mk/1.1.0" +const DealProtocolID111 = "/fil/storage/mk/1.1.1" + +// AskProtocolID is the ID for the libp2p protocol for querying miners for their current StorageAsk. +const OldAskProtocolID = "/fil/storage/ask/1.0.1" +const AskProtocolID = "/fil/storage/ask/1.1.0" + +// DealStatusProtocolID is the ID for the libp2p protocol for querying miners for the current status of a deal. +const OldDealStatusProtocolID = "/fil/storage/status/1.0.1" +const DealStatusProtocolID = "/fil/storage/status/1.1.0" + +// Balance represents a current balance of funds in the StorageMarketActor. +type Balance struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// StorageAsk defines the parameters by which a miner will choose to accept or +// reject a deal. Note: making a storage deal proposal which matches the miner's +// ask is a precondition, but not sufficient to ensure the deal is accepted (the +// storage provider may run its own decision logic). +type StorageAsk struct { + // Price per GiB / Epoch + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk is an ask signed by the miner's private key +type SignedStorageAsk struct { + Ask *StorageAsk + Signature *crypto.Signature +} + +// SignedStorageAskUndefined represents the empty value for SignedStorageAsk +var SignedStorageAskUndefined = SignedStorageAsk{} + +// StorageAskOption allows custom configuration of a storage ask +type StorageAskOption func(*StorageAsk) + +// MinPieceSize configures a minimum piece size of a StorageAsk +func MinPieceSize(minPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MinPieceSize = minPieceSize + } +} + +// MaxPieceSize configures maxiumum piece size of a StorageAsk +func MaxPieceSize(maxPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MaxPieceSize = maxPieceSize + } +} + +// StorageAskUndefined represents an empty value for StorageAsk +var StorageAskUndefined = StorageAsk{} + +type ClientDealProposal = market.ClientDealProposal + +// MinerDeal is the local state tracked for a deal by a StorageProvider +type MinerDeal struct { + ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + FundsReserved abi.TokenAmount + Ref *DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID + SectorNumber abi.SectorNumber + + InboundCAR string + RemoteFilepath string + Worker string +} + +// NewDealStages creates a new DealStages object ready to be used. +// EXPERIMENTAL; subject to change. +func NewDealStages() *DealStages { + return &DealStages{} +} + +// DealStages captures a timeline of the progress of a deal, grouped by stages. +// EXPERIMENTAL; subject to change. +type DealStages struct { + // Stages contains an entry for every stage that the deal has gone through. + // Each stage then contains logs. + Stages []*DealStage +} + +// DealStages captures data about the execution of a deal stage. +// EXPERIMENTAL; subject to change. +type DealStage struct { + // Human-readable fields. + // TODO: these _will_ need to be converted to canonical representations, so + // they are machine readable. + Name string + Description string + ExpectedDuration string + + // Timestamps. + // TODO: may be worth adding an exit timestamp. It _could_ be inferred from + // the start of the next stage, or from the timestamp of the last log line + // if this is a terminal stage. But that's non-determistic and it relies on + // assumptions. + CreatedTime cbg.CborTime + UpdatedTime cbg.CborTime + + // Logs contains a detailed timeline of events that occurred inside + // this stage. + Logs []*Log +} + +// Log represents a point-in-time event that occurred inside a deal stage. +// EXPERIMENTAL; subject to change. +type Log struct { + // Log is a human readable message. + // + // TODO: this _may_ need to be converted to a canonical data model so it + // is machine-readable. + Log string + + UpdatedTime cbg.CborTime +} + +// GetStage returns the DealStage object for a named stage, or nil if not found. +// +// TODO: the input should be a strongly-typed enum instead of a free-form string. +// TODO: drop Get from GetStage to make this code more idiomatic. Return a +// second ok boolean to make it even more idiomatic. +// EXPERIMENTAL; subject to change. +func (ds *DealStages) GetStage(stage string) *DealStage { + if ds == nil { + return nil + } + + for _, s := range ds.Stages { + if s.Name == stage { + return s + } + } + + return nil +} + +// AddStageLog adds a log to the specified stage, creating the stage if it +// doesn't exist yet. +// EXPERIMENTAL; subject to change. +func (ds *DealStages) AddStageLog(stage, description, expectedDuration, msg string) { + if ds == nil { + return + } + + log.Debugf("adding log for stage <%s> msg <%s>", stage, msg) + + now := curTime() + st := ds.GetStage(stage) + if st == nil { + st = &DealStage{ + CreatedTime: now, + } + ds.Stages = append(ds.Stages, st) + } + + st.Name = stage + st.Description = description + st.ExpectedDuration = expectedDuration + st.UpdatedTime = now + if msg != "" && (len(st.Logs) == 0 || st.Logs[len(st.Logs)-1].Log != msg) { + // only add the log if it's not a duplicate. + st.Logs = append(st.Logs, &Log{msg, now}) + } +} + +// AddLog adds a log inside the DealStages object of the deal. +// EXPERIMENTAL; subject to change. +func (d *ClientDeal) AddLog(msg string, a ...interface{}) { + if len(a) > 0 { + msg = fmt.Sprintf(msg, a...) + } + + stage := DealStates[d.State] + description := DealStatesDescriptions[d.State] + expectedDuration := DealStatesDurations[d.State] + + d.DealStages.AddStageLog(stage, description, expectedDuration, msg) +} + +// ClientDeal is the local state tracked for a deal by a StorageClient +type ClientDeal struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef + Message string + DealStages *DealStages + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime + TransferChannelID *datatransfer.ChannelID + SectorNumber abi.SectorNumber +} + +// StorageProviderInfo describes on chain information about a StorageProvider +// (use QueryAsk to determine more specific deal parameters) +type StorageProviderInfo struct { + Address address.Address // actor address + Owner address.Address + Worker address.Address // signs messages + SectorSize uint64 + PeerID peer.ID + Addrs []ma.Multiaddr +} + +// ProposeStorageDealResult returns the result for a proposing a deal +type ProposeStorageDealResult struct { + ProposalCid cid.Cid +} + +// ProposeStorageDealParams describes the parameters for proposing a storage deal +type ProposeStorageDealParams struct { + Addr address.Address + Info *StorageProviderInfo + Data *DataRef + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + Price abi.TokenAmount + Collateral abi.TokenAmount + Rt abi.RegisteredSealProof + FastRetrieval bool + VerifiedDeal bool +} + +const ( + // TTGraphsync means data for a deal will be transferred by graphsync + TTGraphsync = "graphsync" + + // TTManual means data for a deal will be transferred manually and imported + // on the provider + TTManual = "manual" +) + +// DataRef is a reference for how data will be transferred for a given storage deal +type DataRef struct { + TransferType string + Root cid.Cid + + PieceCid *cid.Cid // Optional for non-manual transfer, will be recomputed from the data if not given + PieceSize abi.UnpaddedPieceSize // Optional for non-manual transfer, will be recomputed from the data if not given + RawBlockSize uint64 // Optional: used as the denominator when calculating transfer % +} + +// ProviderDealState represents a Provider's current state of a deal +type ProviderDealState struct { + State StorageDealStatus + Message string + Proposal *market.DealProposal + ProposalCid *cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + DealID abi.DealID + FastRetrieval bool +} + +func curTime() cbg.CborTime { + now := time.Now() + return cbg.CborTime(time.Unix(0, now.UnixNano()).UTC()) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_cbor_gen.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_cbor_gen.go new file mode 100644 index 00000000000..57b493fd973 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_cbor_gen.go @@ -0,0 +1,3141 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package storagemarket + +import ( + "fmt" + "io" + "math" + "sort" + + datatransfer "github.com/filecoin-project/go-data-transfer" + filestore "github.com/filecoin-project/go-fil-markets/filestore" + abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" + crypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *ClientDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{179}); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.MinerWorker (address.Address) (struct) + if len("MinerWorker") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWorker\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinerWorker"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWorker")); err != nil { + return err + } + + if err := t.MinerWorker.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DataRef (storagemarket.DataRef) (struct) + if len("DataRef") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DataRef\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DataRef"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DataRef")); err != nil { + return err + } + + if err := t.DataRef.MarshalCBOR(cw); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.DealStages (storagemarket.DealStages) (struct) + if len("DealStages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealStages\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealStages"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealStages")); err != nil { + return err + } + + if err := t.DealStages.MarshalCBOR(cw); err != nil { + return err + } + + // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishMessage")); err != nil { + return err + } + + if t.PublishMessage == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.PollRetryCount (uint64) (uint64) + if len("PollRetryCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollRetryCount\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PollRetryCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollRetryCount")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { + return err + } + + // t.PollErrorCount (uint64) (uint64) + if len("PollErrorCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollErrorCount\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PollErrorCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollErrorCount")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelID (datatransfer.ChannelID) (struct) + if len("TransferChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelID")); err != nil { + return err + } + + if err := t.TransferChannelID.MarshalCBOR(cw); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + return nil +} + +func (t *ClientDeal) UnmarshalCBOR(r io.Reader) (err error) { + *t = ClientDeal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.MinerWorker (address.Address) (struct) + case "MinerWorker": + + { + + if err := t.MinerWorker.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DataRef (storagemarket.DataRef) (struct) + case "DataRef": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef) + if err := t.DataRef.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.DealStages (storagemarket.DealStages) (struct) + case "DealStages": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealStages = new(DealStages) + if err := t.DealStages.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealStages pointer: %w", err) + } + } + + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.PollRetryCount (uint64) (uint64) + case "PollRetryCount": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) + + } + // t.PollErrorCount (uint64) (uint64) + case "PollErrorCount": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelID (datatransfer.ChannelID) (struct) + case "TransferChannelID": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelID = new(datatransfer.ChannelID) + if err := t.TransferChannelID.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelID pointer: %w", err) + } + } + + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *MinerDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{180}); err != nil { + return err + } + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Client")); err != nil { + return err + } + + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PiecePath")); err != nil { + return err + } + + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(cw); err != nil { + return err + } + + // t.Ref (storagemarket.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(cw); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AvailableForRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelId")); err != nil { + return err + } + + if err := t.TransferChannelId.MarshalCBOR(cw); err != nil { + return err + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + if len("SectorNumber") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorNumber\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorNumber")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.InboundCAR (string) (string) + if len("InboundCAR") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"InboundCAR\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("InboundCAR"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("InboundCAR")); err != nil { + return err + } + + if len(t.InboundCAR) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.InboundCAR was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.InboundCAR))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.InboundCAR)); err != nil { + return err + } + return nil +} + +func (t *MinerDeal) UnmarshalCBOR(r io.Reader) (err error) { + *t = MinerDeal{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + case "Client": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (storagemarket.DataRef) (struct) + case "Ref": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef) + if err := t.Ref.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + // t.SectorNumber (abi.SectorNumber) (uint64) + case "SectorNumber": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.InboundCAR (string) (string) + case "InboundCAR": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.InboundCAR = string(sval) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Balance) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Locked (big.Int) (struct) + if len("Locked") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Locked\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Locked"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Locked")); err != nil { + return err + } + + if err := t.Locked.MarshalCBOR(cw); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if len("Available") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Available\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Available"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Available")); err != nil { + return err + } + + if err := t.Available.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Balance) UnmarshalCBOR(r io.Reader) (err error) { + *t = Balance{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Balance: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Locked (big.Int) (struct) + case "Locked": + + { + + if err := t.Locked.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + case "Available": + + { + + if err := t.Available.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Ask (storagemarket.StorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(cw); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = SignedStorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedStorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (storagemarket.StorageAsk) (struct) + case "Ask": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk) + if err := t.Ask.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *StorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{168}); err != nil { + return err + } + + // t.Price (big.Int) (struct) + if len("Price") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Price\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Price"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Price")); err != nil { + return err + } + + if err := t.Price.MarshalCBOR(cw); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if len("VerifiedPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VerifiedPrice\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("VerifiedPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VerifiedPrice")); err != nil { + return err + } + + if err := t.VerifiedPrice.MarshalCBOR(cw); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + if len("MinPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MinPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + if len("MaxPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MaxPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(cw); err != nil { + return err + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if len("Timestamp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Timestamp\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Timestamp"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Timestamp")); err != nil { + return err + } + + if t.Timestamp >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.Expiry (abi.ChainEpoch) (int64) + if len("Expiry") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Expiry\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Expiry"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Expiry")); err != nil { + return err + } + + if t.Expiry >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.SeqNo (uint64) (uint64) + if len("SeqNo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SeqNo\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SeqNo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SeqNo")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + return nil +} + +func (t *StorageAsk) UnmarshalCBOR(r io.Reader) (err error) { + *t = StorageAsk{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("StorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Price (big.Int) (struct) + case "Price": + + { + + if err := t.Price.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.VerifiedPrice (big.Int) (struct) + case "VerifiedPrice": + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + case "MinPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + case "MaxPieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Timestamp (abi.ChainEpoch) (int64) + case "Timestamp": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.Expiry (abi.ChainEpoch) (int64) + case "Expiry": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.SeqNo (uint64) (uint64) + case "SeqNo": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DataRef) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{165}); err != nil { + return err + } + + // t.TransferType (string) (string) + if len("TransferType") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferType\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TransferType"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferType")); err != nil { + return err + } + + if len(t.TransferType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.TransferType was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TransferType))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.TransferType)); err != nil { + return err + } + + // t.Root (cid.Cid) (struct) + if len("Root") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Root\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Root"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Root")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.Root); err != nil { + return xerrors.Errorf("failed to write cid field t.Root: %w", err) + } + + // t.PieceCid (cid.Cid) (struct) + if len("PieceCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCid")); err != nil { + return err + } + + if t.PieceCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + } + + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + if len("PieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + // t.RawBlockSize (uint64) (uint64) + if len("RawBlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RawBlockSize\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RawBlockSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("RawBlockSize")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.RawBlockSize)); err != nil { + return err + } + + return nil +} + +func (t *DataRef) UnmarshalCBOR(r io.Reader) (err error) { + *t = DataRef{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DataRef: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.TransferType (string) (string) + case "TransferType": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.TransferType = string(sval) + } + // t.Root (cid.Cid) (struct) + case "Root": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } + + t.Root = c + + } + // t.PieceCid (cid.Cid) (struct) + case "PieceCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } + + } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + case "PieceSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) + + } + // t.RawBlockSize (uint64) (uint64) + case "RawBlockSize": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RawBlockSize = uint64(extra) + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{168}); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Proposal")); err != nil { + return err + } + + if err := t.Proposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if t.ProposalCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) (err error) { + *t = ProviderDealState{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + case "Proposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStages) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{161}); err != nil { + return err + } + + // t.Stages ([]*storagemarket.DealStage) (slice) + if len("Stages") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Stages\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Stages"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Stages")); err != nil { + return err + } + + if len(t.Stages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Stages was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Stages))); err != nil { + return err + } + for _, v := range t.Stages { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *DealStages) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStages{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStages: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Stages ([]*storagemarket.DealStage) (slice) + case "Stages": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Stages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Stages = make([]*DealStage, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealStage + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Stages[i] = &v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealStage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.Name (string) (string) + if len("Name") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Name\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Name"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Name")); err != nil { + return err + } + + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Name)); err != nil { + return err + } + + // t.Description (string) (string) + if len("Description") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Description\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Description"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Description")); err != nil { + return err + } + + if len(t.Description) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Description was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Description)); err != nil { + return err + } + + // t.ExpectedDuration (string) (string) + if len("ExpectedDuration") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ExpectedDuration\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ExpectedDuration"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ExpectedDuration")); err != nil { + return err + } + + if len(t.ExpectedDuration) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ExpectedDuration was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ExpectedDuration))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ExpectedDuration)); err != nil { + return err + } + + // t.CreatedTime (typegen.CborTime) (struct) + if len("CreatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreatedTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreatedTime")); err != nil { + return err + } + + if err := t.CreatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if len("UpdatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdatedTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdatedTime")); err != nil { + return err + } + + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + + // t.Logs ([]*storagemarket.Log) (slice) + if len("Logs") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Logs\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Logs"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Logs")); err != nil { + return err + } + + if len(t.Logs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Logs was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Logs))); err != nil { + return err + } + for _, v := range t.Logs { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *DealStage) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealStage{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealStage: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Name (string) (string) + case "Name": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + // t.Description (string) (string) + case "Description": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Description = string(sval) + } + // t.ExpectedDuration (string) (string) + case "ExpectedDuration": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.ExpectedDuration = string(sval) + } + // t.CreatedTime (typegen.CborTime) (struct) + case "CreatedTime": + + { + + if err := t.CreatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CreatedTime: %w", err) + } + + } + // t.UpdatedTime (typegen.CborTime) (struct) + case "UpdatedTime": + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + // t.Logs ([]*storagemarket.Log) (slice) + case "Logs": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Logs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Logs = make([]*Log, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Log + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Logs[i] = &v + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *Log) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.Log (string) (string) + if len("Log") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Log\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Log"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Log")); err != nil { + return err + } + + if len(t.Log) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Log was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Log))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Log)); err != nil { + return err + } + + // t.UpdatedTime (typegen.CborTime) (struct) + if len("UpdatedTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdatedTime\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdatedTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdatedTime")); err != nil { + return err + } + + if err := t.UpdatedTime.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { + *t = Log{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Log: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Log (string) (string) + case "Log": + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Log = string(sval) + } + // t.UpdatedTime (typegen.CborTime) (struct) + case "UpdatedTime": + + { + + if err := t.UpdatedTime.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.UpdatedTime: %w", err) + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_test.go new file mode 100644 index 00000000000..8b5df7dfa18 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/storagemarket/types_test.go @@ -0,0 +1,13 @@ +package storagemarket_test + +import ( + "testing" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +func TestDealStagesNil(t *testing.T) { + var ds *storagemarket.DealStages + ds.GetStage("none") // no panic. + ds.AddStageLog("MyStage", "desc", "duration", "msg") // no panic. +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/dagstore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/dagstore.go new file mode 100644 index 00000000000..eb30d507237 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/dagstore.go @@ -0,0 +1,87 @@ +package stores + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + bstore "github.com/ipfs/go-ipfs-blockstore" + carindex "github.com/ipld/go-car/v2/index" + + "github.com/filecoin-project/dagstore" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +type ClosableBlockstore interface { + bstore.Blockstore + io.Closer +} + +// DAGStoreWrapper hides the details of the DAG store implementation from +// the other parts of go-fil-markets. +type DAGStoreWrapper interface { + // RegisterShard loads a CAR file into the DAG store and builds an + // index for it, sending the result on the supplied channel on completion + RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error + + // LoadShard fetches the data for a shard and provides a blockstore + // interface to it. + // + // The blockstore must be closed to release the shard. + LoadShard(ctx context.Context, pieceCid cid.Cid) (ClosableBlockstore, error) + + // MigrateDeals migrates the supplied storage deals into the DAG store. + MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) + + // GetPiecesContainingBlock returns the CID of all pieces that contain + // the block with the given CID + GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) + + GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) + + // DestroyShard initiates the registration of a new shard. + // + // This method returns an error synchronously if preliminary validation fails. + // Otherwise, it queues the shard for destruction. The caller should monitor + // supplied channel for a result. + DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error + + // Close closes the dag store wrapper. + Close() error +} + +// RegisterShardSync calls the DAGStore RegisterShard method and waits +// synchronously in a dedicated channel until the registration has completed +// fully. +func RegisterShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid, carPath string, eagerInit bool) error { + resch := make(chan dagstore.ShardResult, 1) + if err := ds.RegisterShard(ctx, pieceCid, carPath, eagerInit, resch); err != nil { + return err + } + + // TODO: Can I rely on RegisterShard to return an error if the context times out? + select { + case <-ctx.Done(): + return ctx.Err() + case res := <-resch: + return res.Error + } +} + +// DestroyShardSync calls the DAGStore DestroyShard method and waits +// synchronously in a dedicated channel until the shard has been destroyed completely. +func DestroyShardSync(ctx context.Context, ds DAGStoreWrapper, pieceCid cid.Cid) error { + resch := make(chan dagstore.ShardResult, 1) + + if err := ds.DestroyShard(ctx, pieceCid, resch); err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case res := <-resch: + return res.Error + } +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/error.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/error.go new file mode 100644 index 00000000000..cc9a4767ec7 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/error.go @@ -0,0 +1,9 @@ +package stores + +import "golang.org/x/xerrors" + +var ErrNotFound = xerrors.New("not found") + +func IsNotFound(err error) bool { + return xerrors.Is(err, ErrNotFound) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore.go new file mode 100644 index 00000000000..c15114db425 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore.go @@ -0,0 +1,163 @@ +package stores + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/ipfs/go-filestore" + bstore "github.com/ipfs/go-ipfs-blockstore" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + mh "github.com/multiformats/go-multihash" + "golang.org/x/xerrors" +) + +// ReadOnlyFilestore opens the CAR in the specified path as as a read-only +// blockstore, and fronts it with a Filestore whose positional mappings are +// stored inside the CAR itself. It must be closed after done. +func ReadOnlyFilestore(path string) (ClosableBlockstore, error) { + ro, err := OpenReadOnly(path, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true), + ) + + if err != nil { + return nil, err + } + + bs, err := FilestoreOf(ro) + if err != nil { + return nil, err + } + + return &closableBlockstore{Blockstore: bs, closeFn: ro.Close}, nil +} + +// ReadWriteFilestore opens the CAR in the specified path as as a read-write +// blockstore, and fronts it with a Filestore whose positional mappings are +// stored inside the CAR itself. It must be closed after done. Closing will +// finalize the CAR blockstore. +func ReadWriteFilestore(path string, roots ...cid.Cid) (ClosableBlockstore, error) { + rw, err := OpenReadWrite(path, roots, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true), + ) + if err != nil { + return nil, err + } + + bs, err := FilestoreOf(rw) + if err != nil { + return nil, err + } + + return &closableBlockstore{Blockstore: bs, closeFn: rw.Finalize}, nil +} + +// FilestoreOf returns a FileManager/Filestore backed entirely by a +// blockstore without requiring a datastore. It achieves this by coercing the +// blockstore into a datastore. The resulting blockstore is suitable for usage +// with DagBuilderHelper with DagBuilderParams#NoCopy=true. +func FilestoreOf(bs bstore.Blockstore) (bstore.Blockstore, error) { + coercer := &dsCoercer{bs} + + // the FileManager stores positional infos (positional mappings) in a + // datastore, which in our case is the blockstore coerced into a datastore. + // + // Passing the root dir as a base path makes me uneasy, but these filestores + // are only used locally. + fm := filestore.NewFileManager(coercer, "/") + fm.AllowFiles = true + + // the Filestore sifts leaves (PosInfos) from intermediate nodes. It writes + // PosInfo leaves to the datastore (which in our case is the coerced + // blockstore), and the intermediate nodes to the blockstore proper (since + // they cannot be mapped to the file. + fstore := filestore.NewFilestore(bs, fm) + bs = bstore.NewIdStore(fstore) + + return bs, nil +} + +var cidBuilder = cid.V1Builder{Codec: cid.Raw, MhType: mh.SHA2_256} + +// dsCoercer coerces a Blockstore to present a datastore interface, apt for +// usage with the Filestore/FileManager. Only PosInfos will be written through +// this path. +type dsCoercer struct { + bstore.Blockstore +} + +var _ datastore.Batching = (*dsCoercer)(nil) + +func (crcr *dsCoercer) Get(ctx context.Context, key datastore.Key) (value []byte, err error) { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to create cid: %w", err) + } + + blk, err := crcr.Blockstore.Get(ctx, c) + if err != nil { + return nil, xerrors.Errorf("failed to get cid %s: %w", c, err) + } + return blk.RawData(), nil +} + +func (crcr *dsCoercer) Put(ctx context.Context, key datastore.Key, value []byte) error { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return xerrors.Errorf("failed to create cid: %w", err) + } + blk, err := blocks.NewBlockWithCid(value, c) + if err != nil { + return xerrors.Errorf("failed to create block: %w", err) + } + if err := crcr.Blockstore.Put(ctx, blk); err != nil { + return xerrors.Errorf("failed to put block: %w", err) + } + return nil +} + +func (crcr *dsCoercer) Has(ctx context.Context, key datastore.Key) (exists bool, err error) { + c, err := cidBuilder.Sum(key.Bytes()) + if err != nil { + return false, xerrors.Errorf("failed to create cid: %w", err) + } + return crcr.Blockstore.Has(ctx, c) +} + +func (crcr *dsCoercer) Batch(_ context.Context) (datastore.Batch, error) { + return datastore.NewBasicBatch(crcr), nil +} + +func (crcr *dsCoercer) GetSize(_ context.Context, _ datastore.Key) (size int, err error) { + return 0, xerrors.New("operation NOT supported: GetSize") +} + +func (crcr *dsCoercer) Query(_ context.Context, _ query.Query) (query.Results, error) { + return nil, xerrors.New("operation NOT supported: Query") +} + +func (crcr *dsCoercer) Delete(_ context.Context, _ datastore.Key) error { + return xerrors.New("operation NOT supported: Delete") +} + +func (crcr *dsCoercer) Sync(_ context.Context, _ datastore.Key) error { + return xerrors.New("operation NOT supported: Sync") +} + +func (crcr *dsCoercer) Close() error { + return nil +} + +type closableBlockstore struct { + bstore.Blockstore + closeFn func() error +} + +func (c *closableBlockstore) Close() error { + return c.closeFn() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore_test.go new file mode 100644 index 00000000000..873edfe1eb8 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/filestore_test.go @@ -0,0 +1,151 @@ +package stores + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + bstore "github.com/ipfs/go-ipfs-blockstore" + offline "github.com/ipfs/go-ipfs-exchange-offline" + files "github.com/ipfs/go-ipfs-files" + "github.com/ipfs/go-merkledag" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipld/go-car/v2/blockstore" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-fil-markets/shared_testutil/unixfs" +) + +func TestFilestoreRoundtrip(t *testing.T) { + ctx := context.Background() + normalFilePath, origBytes := createFile(t, 10, 10485760) + + // write out a unixfs dag to an inmemory store to get the root. + root := writeUnixfsDAGInmemory(t, normalFilePath) + + // write out a unixfs dag to a file store backed by a CAR file. + tmpCARv2, err := os.CreateTemp(t.TempDir(), "rand") + require.NoError(t, err) + require.NoError(t, tmpCARv2.Close()) + + // writing a filestore, and then using it as as source. + fs, err := ReadWriteFilestore(tmpCARv2.Name(), root) + require.NoError(t, err) + + dagSvc := merkledag.NewDAGService(blockservice.New(fs, offline.Exchange(fs))) + root2 := unixfs.WriteUnixfsDAGTo(t, normalFilePath, dagSvc) + require.NoError(t, fs.Close()) + require.Equal(t, root, root2) + + // it works if we use a Filestore backed by the given CAR file + fs, err = ReadOnlyFilestore(tmpCARv2.Name()) + require.NoError(t, err) + + fbz, err := dagToNormalFile(t, ctx, root, fs) + require.NoError(t, err) + require.NoError(t, fs.Close()) + + // assert contents are equal + require.EqualValues(t, origBytes, fbz) +} + +func TestReadOnlyFilestoreWithDenseCARFile(t *testing.T) { + ctx := context.Background() + normalFilePath, origContent := createFile(t, 10, 10485760) + + // write out a unixfs dag to an inmemory store to get the root. + root := writeUnixfsDAGInmemory(t, normalFilePath) + + // write out a unixfs dag to a read-write CARv2 blockstore to get the full CARv2 file. + tmpCARv2, err := os.CreateTemp(t.TempDir(), "rand") + require.NoError(t, err) + require.NoError(t, tmpCARv2.Close()) + + bs, err := blockstore.OpenReadWrite(tmpCARv2.Name(), []cid.Cid{root}) + require.NoError(t, err) + + dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + root2 := unixfs.WriteUnixfsDAGTo(t, normalFilePath, dagSvc) + require.NoError(t, bs.Finalize()) + require.Equal(t, root, root2) + + // Open a read only filestore with the full CARv2 file + fs, err := ReadOnlyFilestore(tmpCARv2.Name()) + require.NoError(t, err) + + // write out the normal file using the Filestore and assert the contents match. + finalBytes, err := dagToNormalFile(t, ctx, root, fs) + require.NoError(t, err) + require.NoError(t, fs.Close()) + + require.EqualValues(t, origContent, finalBytes) +} + +func dagToNormalFile(t *testing.T, ctx context.Context, root cid.Cid, bs bstore.Blockstore) ([]byte, error) { + outputF, err := os.CreateTemp(t.TempDir(), "rand") + if err != nil { + return nil, err + } + + bsvc := blockservice.New(bs, offline.Exchange(bs)) + dag := merkledag.NewDAGService(bsvc) + nd, err := dag.Get(ctx, root) + if err != nil { + return nil, err + } + + file, err := unixfile.NewUnixfsFile(ctx, dag, nd) + if err != nil { + return nil, err + } + if err := files.WriteTo(file, outputF.Name()); err != nil { + return nil, err + } + + if _, err = outputF.Seek(0, io.SeekStart); err != nil { + return nil, err + } + finalBytes, err := ioutil.ReadAll(outputF) + if err != nil { + return nil, err + } + + if err := outputF.Close(); err != nil { + return nil, err + } + + return finalBytes, nil +} + +func createFile(t *testing.T, rseed int64, size int64) (path string, contents []byte) { + source := io.LimitReader(rand.New(rand.NewSource(rseed)), size) + + file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat") + require.NoError(t, err) + + n, err := io.Copy(file, source) + require.NoError(t, err) + require.EqualValues(t, n, size) + + _, err = file.Seek(0, io.SeekStart) + require.NoError(t, err) + bz, err := ioutil.ReadAll(file) + require.NoError(t, err) + require.NoError(t, file.Close()) + + return file.Name(), bz +} + +func writeUnixfsDAGInmemory(t *testing.T, path string) cid.Cid { + bs := bstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + dagSvc := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + root := unixfs.WriteUnixfsDAGTo(t, path, dagSvc) + return root +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/kvcarbs.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/kvcarbs.go new file mode 100644 index 00000000000..f29f39761bd --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/kvcarbs.go @@ -0,0 +1,1676 @@ +package stores + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car/util" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/index" + "github.com/multiformats/go-multicodec" + "github.com/multiformats/go-multihash" + "github.com/multiformats/go-varint" + "github.com/petar/GoLLRB/llrb" + cborg "github.com/whyrusleeping/cbor/go" + "golang.org/x/exp/mmap" +) + +/* + + This file contains extracted parts of CARv2 blockstore, modified to allow + storage of arbitrary data indexed by ID CIDs. + + This was allowed by go-car prior to v2.1.0, but newer go-car releases + require that data matches the multihash, which means that the library can + no longer be exploited as a KV store as is done in filestore.go. + + We duplicate the code here temporarily, as an alternative to breaking + existing nodes, or adding an option to go-car which would break the CAR spec + (it also contains this hack to a single repo). + + Ideally we should migrate to a real KV store, but even for that we'll still + need this code for the migration process. + +*/ + +// Modified vs go-car/v2 +func isIdentity(cid.Cid) (digest []byte, ok bool, err error) { + /* + dmh, err := multihash.Decode(key.Hash()) + if err != nil { + return nil, false, err + } + ok = dmh.Code == multihash.IDENTITY + digest = dmh.Digest + return digest, ok, nil + */ + + // This is the hack filestore datastore needs to use CARs as a KV store + return nil, false, err +} + +// Code below was copied from go-car/v2 + +var ( + _ io.ReaderAt = (*OffsetReadSeeker)(nil) + _ io.ReadSeeker = (*OffsetReadSeeker)(nil) +) + +// OffsetReadSeeker implements Read, and ReadAt on a section +// of an underlying io.ReaderAt. +// The main difference between io.SectionReader and OffsetReadSeeker is that +// NewOffsetReadSeeker does not require the user to know the number of readable bytes. +// +// It also partially implements Seek, where the implementation panics if io.SeekEnd is passed. +// This is because, OffsetReadSeeker does not know the end of the file therefore cannot seek relative +// to it. +type OffsetReadSeeker struct { + r io.ReaderAt + base int64 + off int64 +} + +// NewOffsetReadSeeker returns an OffsetReadSeeker that reads from r +// starting offset offset off and stops with io.EOF when r reaches its end. +// The Seek function will panic if whence io.SeekEnd is passed. +func NewOffsetReadSeeker(r io.ReaderAt, off int64) *OffsetReadSeeker { + return &OffsetReadSeeker{r, off, off} +} + +func (o *OffsetReadSeeker) Read(p []byte) (n int, err error) { + n, err = o.r.ReadAt(p, o.off) + o.off += int64(n) + return +} + +func (o *OffsetReadSeeker) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, io.EOF + } + off += o.base + return o.r.ReadAt(p, off) +} + +func (o *OffsetReadSeeker) ReadByte() (byte, error) { + b := []byte{0} + _, err := o.Read(b) + return b[0], err +} + +func (o *OffsetReadSeeker) Offset() int64 { + return o.off +} + +func (o *OffsetReadSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + o.off = offset + o.base + case io.SeekCurrent: + o.off += offset + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return o.Position(), nil +} + +// Position returns the current position of this reader relative to the initial offset. +func (o *OffsetReadSeeker) Position() int64 { + return o.off - o.base +} + +var ( + _ io.Writer = (*OffsetWriteSeeker)(nil) + _ io.WriteSeeker = (*OffsetWriteSeeker)(nil) +) + +type OffsetWriteSeeker struct { + w io.WriterAt + base int64 + offset int64 +} + +func NewOffsetWriter(w io.WriterAt, off int64) *OffsetWriteSeeker { + return &OffsetWriteSeeker{w, off, off} +} + +func (ow *OffsetWriteSeeker) Write(b []byte) (n int, err error) { + n, err = ow.w.WriteAt(b, ow.offset) + ow.offset += int64(n) + return +} + +func (ow *OffsetWriteSeeker) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + ow.offset = offset + ow.base + case io.SeekCurrent: + ow.offset += offset + case io.SeekEnd: + panic("unsupported whence: SeekEnd") + } + return ow.Position(), nil +} + +// Position returns the current position of this writer relative to the initial offset, i.e. the number of bytes written. +func (ow *OffsetWriteSeeker) Position() int64 { + return ow.offset - ow.base +} + +type BytesReader interface { + io.Reader + io.ByteReader +} + +func ReadNode(r io.Reader, zeroLenAsEOF bool) (cid.Cid, []byte, error) { + data, err := LdRead(r, zeroLenAsEOF) + if err != nil { + return cid.Cid{}, nil, err + } + + n, c, err := cid.CidFromBytes(data) + if err != nil { + return cid.Cid{}, nil, err + } + + return c, data[n:], nil +} + +func LdWrite(w io.Writer, d ...[]byte) error { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + + buf := make([]byte, 8) + n := varint.PutUvarint(buf, sum) + _, err := w.Write(buf[:n]) + if err != nil { + return err + } + + for _, s := range d { + _, err = w.Write(s) + if err != nil { + return err + } + } + + return nil +} + +func LdSize(d ...[]byte) uint64 { + var sum uint64 + for _, s := range d { + sum += uint64(len(s)) + } + s := varint.UvarintSize(sum) + return sum + uint64(s) +} + +func LdRead(r io.Reader, zeroLenAsEOF bool) ([]byte, error) { + l, err := varint.ReadUvarint(ToByteReader(r)) + if err != nil { + // If the length of bytes read is non-zero when the error is EOF then signal an unclean EOF. + if l > 0 && err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + return nil, err + } else if l == 0 && zeroLenAsEOF { + return nil, io.EOF + } + + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + + return buf, nil +} + +var ( + _ io.ByteReader = (*readerPlusByte)(nil) + _ io.ByteReader = (*readSeekerPlusByte)(nil) + _ io.ByteReader = (*discardingReadSeekerPlusByte)(nil) + _ io.ReadSeeker = (*discardingReadSeekerPlusByte)(nil) + _ io.ReaderAt = (*readSeekerAt)(nil) +) + +type ( + readerPlusByte struct { + io.Reader + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + readSeekerPlusByte struct { + io.ReadSeeker + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + discardingReadSeekerPlusByte struct { + io.Reader + offset int64 + + byteBuf [1]byte // escapes via io.Reader.Read; preallocate + } + + ByteReadSeeker interface { + io.ReadSeeker + io.ByteReader + } + + readSeekerAt struct { + rs io.ReadSeeker + mu sync.Mutex + } +) + +func ToByteReader(r io.Reader) io.ByteReader { + if br, ok := r.(io.ByteReader); ok { + return br + } + return &readerPlusByte{Reader: r} +} + +func ToByteReadSeeker(r io.Reader) ByteReadSeeker { + if brs, ok := r.(ByteReadSeeker); ok { + return brs + } + if rs, ok := r.(io.ReadSeeker); ok { + return &readSeekerPlusByte{ReadSeeker: rs} + } + return &discardingReadSeekerPlusByte{Reader: r} +} + +func ToReaderAt(rs io.ReadSeeker) io.ReaderAt { + if ra, ok := rs.(io.ReaderAt); ok { + return ra + } + return &readSeekerAt{rs: rs} +} + +func (rb *readerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rb, rb.byteBuf[:]) + return rb.byteBuf[0], err +} + +func (rsb *readSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(rsb, rsb.byteBuf[:]) + return rsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) ReadByte() (byte, error) { + _, err := io.ReadFull(drsb, drsb.byteBuf[:]) + return drsb.byteBuf[0], err +} + +func (drsb *discardingReadSeekerPlusByte) Read(p []byte) (read int, err error) { + read, err = drsb.Reader.Read(p) + drsb.offset += int64(read) + return +} + +func (drsb *discardingReadSeekerPlusByte) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + n := offset - drsb.offset + if n < 0 { + panic("unsupported rewind via whence: io.SeekStart") + } + _, err := io.CopyN(ioutil.Discard, drsb, n) + return drsb.offset, err + case io.SeekCurrent: + _, err := io.CopyN(ioutil.Discard, drsb, offset) + return drsb.offset, err + default: + panic("unsupported whence: io.SeekEnd") + } +} + +func (rsa *readSeekerAt) ReadAt(p []byte, off int64) (n int, err error) { + rsa.mu.Lock() + defer rsa.mu.Unlock() + if _, err := rsa.rs.Seek(off, io.SeekStart); err != nil { + return 0, err + } + return rsa.rs.Read(p) +} + +func init() { + cbor.RegisterCborType(CarHeader{}) +} + +type Store interface { + Put(blocks.Block) error +} + +type ReadStore interface { + Get(cid.Cid) (blocks.Block, error) +} + +type CarHeader struct { + Roots []cid.Cid + Version uint64 +} + +type carWriter struct { + ds format.NodeGetter + w io.Writer +} + +func WriteCar(ctx context.Context, ds format.NodeGetter, roots []cid.Cid, w io.Writer) error { + h := &CarHeader{ + Roots: roots, + Version: 1, + } + + if err := WriteHeader(h, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cw := &carWriter{ds: ds, w: w} + seen := cid.NewSet() + for _, r := range roots { + if err := merkledag.Walk(ctx, cw.enumGetLinks, r, seen.Visit); err != nil { + return err + } + } + return nil +} + +func ReadHeader(r io.Reader) (*CarHeader, error) { + hb, err := LdRead(r, false) + if err != nil { + return nil, err + } + + var ch CarHeader + if err := cbor.DecodeInto(hb, &ch); err != nil { + return nil, fmt.Errorf("invalid header: %v", err) + } + + return &ch, nil +} + +func WriteHeader(h *CarHeader, w io.Writer) error { + hb, err := cbor.DumpObject(h) + if err != nil { + return err + } + + return util.LdWrite(w, hb) +} + +func HeaderSize(h *CarHeader) (uint64, error) { + hb, err := cbor.DumpObject(h) + if err != nil { + return 0, err + } + + return util.LdSize(hb), nil +} + +func (cw *carWriter) enumGetLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { + nd, err := cw.ds.Get(ctx, c) + if err != nil { + return nil, err + } + + if err := cw.writeNode(ctx, nd); err != nil { + return nil, err + } + + return nd.Links(), nil +} + +func (cw *carWriter) writeNode(ctx context.Context, nd format.Node) error { + return util.LdWrite(cw.w, nd.Cid().Bytes(), nd.RawData()) +} + +type CarReader struct { + r io.Reader + Header *CarHeader + zeroLenAsEOF bool +} + +func NewCarReaderWithZeroLengthSectionAsEOF(r io.Reader) (*CarReader, error) { + return newCarReader(r, true) +} + +func NewCarReader(r io.Reader) (*CarReader, error) { + return newCarReader(r, false) +} + +func newCarReader(r io.Reader, zeroLenAsEOF bool) (*CarReader, error) { + ch, err := ReadHeader(r) + if err != nil { + return nil, err + } + + if ch.Version != 1 { + return nil, fmt.Errorf("invalid car version: %d", ch.Version) + } + + if len(ch.Roots) == 0 { + return nil, fmt.Errorf("empty car, no roots") + } + + return &CarReader{ + r: r, + Header: ch, + zeroLenAsEOF: zeroLenAsEOF, + }, nil +} + +func (cr *CarReader) Next() (blocks.Block, error) { + c, data, err := ReadNode(cr.r, cr.zeroLenAsEOF) + if err != nil { + return nil, err + } + + hashed, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !hashed.Equals(c) { + return nil, fmt.Errorf("mismatch in content integrity, name: %s, data: %s", c, hashed) + } + + return blocks.NewBlockWithCid(data, c) +} + +type batchStore interface { + PutMany([]blocks.Block) error +} + +func LoadCar(s Store, r io.Reader) (*CarHeader, error) { + cr, err := NewCarReader(r) + if err != nil { + return nil, err + } + + if bs, ok := s.(batchStore); ok { + return loadCarFast(bs, cr) + } + + return loadCarSlow(s, cr) +} + +func loadCarFast(s batchStore, cr *CarReader) (*CarHeader, error) { + var buf []blocks.Block + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + if len(buf) > 0 { + if err := s.PutMany(buf); err != nil { + return nil, err + } + } + return cr.Header, nil + } + return nil, err + } + + buf = append(buf, blk) + + if len(buf) > 1000 { + if err := s.PutMany(buf); err != nil { + return nil, err + } + buf = buf[:0] + } + } +} + +func loadCarSlow(s Store, cr *CarReader) (*CarHeader, error) { + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + return cr.Header, nil + } + return nil, err + } + + if err := s.Put(blk); err != nil { + return nil, err + } + } +} + +// Matches checks whether two headers match. +// Two headers are considered matching if: +// 1. They have the same version number, and +// 2. They contain the same root CIDs in any order. +// Note, this function explicitly ignores the order of roots. +// If order of roots matter use reflect.DeepEqual instead. +func (h CarHeader) Matches(other CarHeader) bool { + if h.Version != other.Version { + return false + } + thisLen := len(h.Roots) + if thisLen != len(other.Roots) { + return false + } + // Headers with a single root are popular. + // Implement a fast execution path for popular cases. + if thisLen == 1 { + return h.Roots[0].Equals(other.Roots[0]) + } + + // Check other contains all roots. + // TODO: should this be optimised for cases where the number of roots are large since it has O(N^2) complexity? + for _, r := range h.Roots { + if !other.containsRoot(r) { + return false + } + } + return true +} + +func (h *CarHeader) containsRoot(root cid.Cid) bool { + for _, r := range h.Roots { + if r.Equals(root) { + return true + } + } + return false +} + +var _ blockstore.Blockstore = (*ReadOnly)(nil) + +var ( + errZeroLengthSection = fmt.Errorf("zero-length carv2 section not allowed by default; see WithZeroLengthSectionAsEOF option") + errReadOnly = fmt.Errorf("called write method on a read-only carv2 blockstore") + errClosed = fmt.Errorf("cannot use a carv2 blockstore after closing") +) + +// ReadOnly provides a read-only CAR Block Store. +type ReadOnly struct { + // mu allows ReadWrite to be safe for concurrent use. + // It's in ReadOnly so that read operations also grab read locks, + // given that ReadWrite embeds ReadOnly for methods like Get and Has. + // + // The main fields guarded by the mutex are the index and the underlying writers. + // For simplicity, the entirety of the blockstore methods grab the mutex. + mu sync.RWMutex + + // When true, the blockstore has been closed via Close, Discard, or + // Finalize, and must not be used. Any further blockstore method calls + // will return errClosed to avoid panics or broken behavior. + closed bool + + // The backing containing the data payload in CARv1 format. + backing io.ReaderAt + // The CARv1 content index. + idx index.Index + + // If we called carv2.NewReaderMmap, remember to close it too. + carv2Closer io.Closer + + opts carv2.Options +} + +type contextKey string + +const asyncErrHandlerKey contextKey = "asyncErrorHandlerKey" + +// UseWholeCIDs is a read option which makes a CAR blockstore identify blocks by +// whole CIDs, and not just their multihashes. The default is to use +// multihashes, which matches the current semantics of go-ipfs-blockstore v1. +// +// Enabling this option affects a number of methods, including read-only ones: +// +// • Get, Has, and HasSize will only return a block +// only if the entire CID is present in the CAR file. +// +// • AllKeysChan will return the original whole CIDs, instead of with their +// multicodec set to "raw" to just provide multihashes. +// +// • If AllowDuplicatePuts isn't set, +// Put and PutMany will deduplicate by the whole CID, +// allowing different CIDs with equal multihashes. +// +// Note that this option only affects the blockstore, and is ignored by the root +// go-car/v2 package. +func UseWholeCIDs(enable bool) carv2.Option { + return func(o *carv2.Options) { + o.BlockstoreUseWholeCIDs = enable + } +} + +// NewReadOnly creates a new ReadOnly blockstore from the backing with a optional index as idx. +// This function accepts both CARv1 and CARv2 backing. +// The blockstore is instantiated with the given index if it is not nil. +// +// Otherwise: +// * For a CARv1 backing an index is generated. +// * For a CARv2 backing an index is only generated if Header.HasIndex returns false. +// +// There is no need to call ReadOnly.Close on instances returned by this function. +func NewReadOnly(backing io.ReaderAt, idx index.Index, opts ...carv2.Option) (*ReadOnly, error) { + b := &ReadOnly{ + opts: carv2.ApplyOptions(opts...), + } + + version, err := readVersion(backing) + if err != nil { + return nil, err + } + switch version { + case 1: + if idx == nil { + if idx, err = generateIndex(backing, opts...); err != nil { + return nil, err + } + } + b.backing = backing + b.idx = idx + return b, nil + case 2: + v2r, err := carv2.NewReader(backing, opts...) + if err != nil { + return nil, err + } + if idx == nil { + if v2r.Header.HasIndex() { + r, err := v2r.IndexReader() + if err != nil { + return nil, err + } + idx, err = index.ReadFrom(r) + if err != nil { + return nil, err + } + } else { + r, err := v2r.DataReader() + if err != nil { + return nil, err + } + idx, err = generateIndex(r, opts...) + if err != nil { + return nil, err + } + } + } + drBacking, err := v2r.DataReader() + if err != nil { + return nil, err + } + b.backing = drBacking + b.idx = idx + return b, nil + default: + return nil, fmt.Errorf("unsupported car version: %v", version) + } +} + +func readVersion(at io.ReaderAt) (uint64, error) { + var rr io.Reader + switch r := at.(type) { + case io.Reader: + rr = r + default: + rr = NewOffsetReadSeeker(r, 0) + } + return carv2.ReadVersion(rr) +} + +func generateIndex(at io.ReaderAt, opts ...carv2.Option) (index.Index, error) { + var rs io.ReadSeeker + switch r := at.(type) { + case io.ReadSeeker: + rs = r + default: + rs = NewOffsetReadSeeker(r, 0) + } + + // Note, we do not set any write options so that all write options fall back onto defaults. + return carv2.GenerateIndex(rs, opts...) +} + +// OpenReadOnly opens a read-only blockstore from a CAR file (either v1 or v2), generating an index if it does not exist. +// Note, the generated index if the index does not exist is ephemeral and only stored in memory. +// See car.GenerateIndex and Index.Attach for persisting index onto a CAR file. +func OpenReadOnly(path string, opts ...carv2.Option) (*ReadOnly, error) { + f, err := mmap.Open(path) + if err != nil { + return nil, err + } + + robs, err := NewReadOnly(f, nil, opts...) + if err != nil { + return nil, err + } + robs.carv2Closer = f + + return robs, nil +} + +func (b *ReadOnly) readBlock(idx int64) (cid.Cid, []byte, error) { + bcid, data, err := ReadNode(NewOffsetReadSeeker(b.backing, idx), b.opts.ZeroLengthSectionAsEOF) + return bcid, data, err +} + +// DeleteBlock is unsupported and always errors. +func (b *ReadOnly) DeleteBlock(_ context.Context, _ cid.Cid) error { + return errReadOnly +} + +// Has indicates if the store contains a block that corresponds to the given key. +// This function always returns true for any given key with multihash.IDENTITY code. +func (b *ReadOnly) Has(ctx context.Context, key cid.Cid) (bool, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if _, ok, err := isIdentity(key); err != nil { + return false, err + } else if ok { + return true, nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return false, errClosed + } + + var fnFound bool + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + uar := NewOffsetReadSeeker(b.backing, int64(offset)) + var err error + _, err = varint.ReadUvarint(uar) + if err != nil { + fnErr = err + return false + } + _, readCid, err := cid.CidFromReader(uar) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + fnFound = readCid.Equals(key) + return !fnFound // continue looking if we haven't found it + } else { + fnFound = bytes.Equal(readCid.Hash(), key.Hash()) + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return false, nil + } else if err != nil { + return false, err + } + return fnFound, fnErr +} + +// Get gets a block corresponding to the given key. +// This API will always return true if the given key has multihash.IDENTITY code. +func (b *ReadOnly) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := isIdentity(key); err != nil { + return nil, err + } else if ok { + return blocks.NewBlockWithCid(digest, key) + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return nil, errClosed + } + + var fnData []byte + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + readCid, data, err := b.readBlock(int64(offset)) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + if readCid.Equals(key) { + fnData = data + return false + } else { + return true // continue looking + } + } else { + if bytes.Equal(readCid.Hash(), key.Hash()) { + fnData = data + } + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return nil, format.ErrNotFound{Cid: key} + } else if err != nil { + return nil, err + } else if fnErr != nil { + return nil, fnErr + } + if fnData == nil { + return nil, format.ErrNotFound{Cid: key} + } + return blocks.NewBlockWithCid(fnData, key) +} + +// GetSize gets the size of an item corresponding to the given key. +func (b *ReadOnly) GetSize(ctx context.Context, key cid.Cid) (int, error) { + // Check if the given CID has multihash.IDENTITY code + // Note, we do this without locking, since there is no shared information to lock for in order to perform the check. + if digest, ok, err := isIdentity(key); err != nil { + return 0, err + } else if ok { + return len(digest), nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return 0, errClosed + } + + fnSize := -1 + var fnErr error + err := b.idx.GetAll(key, func(offset uint64) bool { + rdr := NewOffsetReadSeeker(b.backing, int64(offset)) + sectionLen, err := varint.ReadUvarint(rdr) + if err != nil { + fnErr = err + return false + } + cidLen, readCid, err := cid.CidFromReader(rdr) + if err != nil { + fnErr = err + return false + } + if b.opts.BlockstoreUseWholeCIDs { + if readCid.Equals(key) { + fnSize = int(sectionLen) - cidLen + return false + } else { + return true // continue looking + } + } else { + if bytes.Equal(readCid.Hash(), key.Hash()) { + fnSize = int(sectionLen) - cidLen + } + return false + } + }) + if errors.Is(err, index.ErrNotFound) { + return -1, format.ErrNotFound{Cid: key} + } else if err != nil { + return -1, err + } else if fnErr != nil { + return -1, fnErr + } + if fnSize == -1 { + return -1, format.ErrNotFound{Cid: key} + } + return fnSize, nil +} + +// Put is not supported and always returns an error. +func (b *ReadOnly) Put(context.Context, blocks.Block) error { + return errReadOnly +} + +// PutMany is not supported and always returns an error. +func (b *ReadOnly) PutMany(context.Context, []blocks.Block) error { + return errReadOnly +} + +// WithAsyncErrorHandler returns a context with async error handling set to the given errHandler. +// Any errors that occur during asynchronous operations of AllKeysChan will be passed to the given +// handler. +func WithAsyncErrorHandler(ctx context.Context, errHandler func(error)) context.Context { + return context.WithValue(ctx, asyncErrHandlerKey, errHandler) +} + +// AllKeysChan returns the list of keys in the CAR data payload. +// If the ctx is constructed using WithAsyncErrorHandler any errors that occur during asynchronous +// retrieval of CIDs will be passed to the error handler function set in context. +// Otherwise, errors will terminate the asynchronous operation silently. +// +// See WithAsyncErrorHandler +func (b *ReadOnly) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + // We release the lock when the channel-sending goroutine stops. + // Note that we can't use a deferred unlock here, + // because if we return a nil error, + // we only want to unlock once the async goroutine has stopped. + b.mu.RLock() + + if b.closed { + b.mu.RUnlock() // don't hold the mutex forever + return nil, errClosed + } + + // TODO we may use this walk for populating the index, and we need to be able to iterate keys in this way somewhere for index generation. In general though, when it's asked for all keys from a blockstore with an index, we should iterate through the index when possible rather than linear reads through the full car. + rdr := NewOffsetReadSeeker(b.backing, 0) + header, err := ReadHeader(rdr) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, fmt.Errorf("error reading car header: %w", err) + } + headerSize, err := HeaderSize(header) + if err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + // TODO: document this choice of 5, or use simpler buffering like 0 or 1. + ch := make(chan cid.Cid, 5) + + // Seek to the end of header. + if _, err = rdr.Seek(int64(headerSize), io.SeekStart); err != nil { + b.mu.RUnlock() // don't hold the mutex forever + return nil, err + } + + go func() { + defer b.mu.RUnlock() + defer close(ch) + + for { + length, err := varint.ReadUvarint(rdr) + if err != nil { + if err != io.EOF { + maybeReportError(ctx, err) + } + return + } + + // Null padding; by default it's an error. + if length == 0 { + if b.opts.ZeroLengthSectionAsEOF { + break + } else { + maybeReportError(ctx, errZeroLengthSection) + return + } + } + + thisItemForNxt := rdr.Offset() + _, c, err := cid.CidFromReader(rdr) + if err != nil { + maybeReportError(ctx, err) + return + } + if _, err := rdr.Seek(thisItemForNxt+int64(length), io.SeekStart); err != nil { + maybeReportError(ctx, err) + return + } + + // If we're just using multihashes, flatten to the "raw" codec. + if !b.opts.BlockstoreUseWholeCIDs { + c = cid.NewCidV1(cid.Raw, c.Hash()) + } + + select { + case ch <- c: + case <-ctx.Done(): + maybeReportError(ctx, ctx.Err()) + return + } + } + }() + return ch, nil +} + +// maybeReportError checks if an error handler is present in context associated to the key +// asyncErrHandlerKey, and if preset it will pass the error to it. +func maybeReportError(ctx context.Context, err error) { + value := ctx.Value(asyncErrHandlerKey) + if eh, _ := value.(func(error)); eh != nil { + eh(err) + } +} + +// HashOnRead is currently unimplemented; hashing on reads never happens. +func (b *ReadOnly) HashOnRead(bool) { + // TODO: implement before the final release? +} + +// Roots returns the root CIDs of the backing CAR. +func (b *ReadOnly) Roots() ([]cid.Cid, error) { + header, err := ReadHeader(NewOffsetReadSeeker(b.backing, 0)) + if err != nil { + return nil, fmt.Errorf("error reading car header: %w", err) + } + return header.Roots, nil +} + +// Close closes the underlying reader if it was opened by OpenReadOnly. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadOnly) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.closeWithoutMutex() +} + +func (b *ReadOnly) closeWithoutMutex() error { + b.closed = true + if b.carv2Closer != nil { + return b.carv2Closer.Close() + } + return nil +} + +var ( + errUnsupported = errors.New("not supported") + insertionIndexCodec = multicodec.Code(0x300003) +) + +type ( + insertionIndex struct { + items llrb.LLRB + } + + recordDigest struct { + digest []byte + index.Record + } +) + +func (r recordDigest) Less(than llrb.Item) bool { + other, ok := than.(recordDigest) + if !ok { + return false + } + return bytes.Compare(r.digest, other.digest) < 0 +} + +func newRecordDigest(r index.Record) recordDigest { + d, err := multihash.Decode(r.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, r} +} + +func newRecordFromCid(c cid.Cid, at uint64) recordDigest { + d, err := multihash.Decode(c.Hash()) + if err != nil { + panic(err) + } + + return recordDigest{d.Digest, index.Record{Cid: c, Offset: at}} +} + +func (ii *insertionIndex) insertNoReplace(key cid.Cid, n uint64) { + ii.items.InsertNoReplace(newRecordFromCid(key, n)) +} + +func (ii *insertionIndex) Get(c cid.Cid) (uint64, error) { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return 0, err + } + entry := recordDigest{digest: d.Digest} + e := ii.items.Get(entry) + if e == nil { + return 0, index.ErrNotFound + } + r, ok := e.(recordDigest) + if !ok { + return 0, errUnsupported + } + + return r.Record.Offset, nil +} + +func (ii *insertionIndex) GetAll(c cid.Cid, fn func(uint64) bool) error { + d, err := multihash.Decode(c.Hash()) + if err != nil { + return err + } + entry := recordDigest{digest: d.Digest} + + any := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + any = true + return fn(existing.Record.Offset) + } + ii.items.AscendGreaterOrEqual(entry, iter) + if !any { + return index.ErrNotFound + } + return nil +} + +func (ii *insertionIndex) Marshal(w io.Writer) (uint64, error) { + l := uint64(0) + if err := binary.Write(w, binary.LittleEndian, int64(ii.items.Len())); err != nil { + return l, err + } + + l += 8 + var err error + iter := func(i llrb.Item) bool { + if err = cborg.Encode(w, i.(recordDigest).Record); err != nil { + return false + } + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + return l, err +} + +func (ii *insertionIndex) Unmarshal(r io.Reader) error { + var length int64 + if err := binary.Read(r, binary.LittleEndian, &length); err != nil { + return err + } + d := cborg.NewDecoder(r) + for i := int64(0); i < length; i++ { + var rec index.Record + if err := d.Decode(&rec); err != nil { + return err + } + ii.items.InsertNoReplace(newRecordDigest(rec)) + } + return nil +} + +func (ii *insertionIndex) Codec() multicodec.Code { + return insertionIndexCodec +} + +func (ii *insertionIndex) Load(rs []index.Record) error { + for _, r := range rs { + rec := newRecordDigest(r) + if rec.digest == nil { + return fmt.Errorf("invalid entry: %v", r) + } + ii.items.InsertNoReplace(rec) + } + return nil +} + +func newInsertionIndex() *insertionIndex { + return &insertionIndex{} +} + +// flatten returns a formatted index in the given codec for more efficient subsequent loading. +func (ii *insertionIndex) flatten(codec multicodec.Code) (index.Index, error) { + si, err := index.New(codec) + if err != nil { + return nil, err + } + rcrds := make([]index.Record, ii.items.Len()) + + idx := 0 + iter := func(i llrb.Item) bool { + rcrds[idx] = i.(recordDigest).Record + idx++ + return true + } + ii.items.AscendGreaterOrEqual(ii.items.Min(), iter) + + if err := si.Load(rcrds); err != nil { + return nil, err + } + return si, nil +} + +// note that hasExactCID is very similar to GetAll, +// but it's separate as it allows us to compare Record.Cid directly, +// whereas GetAll just provides Record.Offset. + +func (ii *insertionIndex) hasExactCID(c cid.Cid) bool { + d, err := multihash.Decode(c.Hash()) + if err != nil { + panic(err) + } + entry := recordDigest{digest: d.Digest} + + found := false + iter := func(i llrb.Item) bool { + existing := i.(recordDigest) + if !bytes.Equal(existing.digest, entry.digest) { + // We've already looked at all entries with matching digests. + return false + } + if existing.Record.Cid == c { + // We found an exact match. + found = true + return false + } + // Continue looking in ascending order. + return true + } + ii.items.AscendGreaterOrEqual(entry, iter) + return found +} + +var _ blockstore.Blockstore = (*ReadWrite)(nil) + +// ReadWrite implements a blockstore that stores blocks in CARv2 format. +// Blocks put into the blockstore can be read back once they are successfully written. +// This implementation is preferable for a write-heavy workload. +// The blocks are written immediately on Put and PutAll calls, while the index is stored in memory +// and updated incrementally. +// +// The Finalize function must be called once the putting blocks are finished. +// Upon calling Finalize header is finalized and index is written out. +// Once finalized, all read and write calls to this blockstore will result in errors. +type ReadWrite struct { + ronly ReadOnly + + f *os.File + dataWriter *OffsetWriteSeeker + idx *insertionIndex + header carv2.Header + + opts carv2.Options +} + +// AllowDuplicatePuts is a write option which makes a CAR blockstore not +// deduplicate blocks in Put and PutMany. The default is to deduplicate, +// which matches the current semantics of go-ipfs-blockstore v1. +// +// Note that this option only affects the blockstore, and is ignored by the root +// go-car/v2 package. +func AllowDuplicatePuts(allow bool) carv2.Option { + return func(o *carv2.Options) { + o.BlockstoreAllowDuplicatePuts = allow + } +} + +// OpenReadWrite creates a new ReadWrite at the given path with a provided set of root CIDs and options. +// +// ReadWrite.Finalize must be called once putting and reading blocks are no longer needed. +// Upon calling ReadWrite.Finalize the CARv2 header and index are written out onto the file and the +// backing file is closed. Once finalized, all read and write calls to this blockstore will result +// in errors. Note, ReadWrite.Finalize must be called on an open instance regardless of whether any +// blocks were put or not. +// +// If a file at given path does not exist, the instantiation will write car.Pragma and data payload +// header (i.e. the inner CARv1 header) onto the file before returning. +// +// When the given path already exists, the blockstore will attempt to resume from it. +// On resumption the existing data sections in file are re-indexed, allowing the caller to continue +// putting any remaining blocks without having to re-ingest blocks for which previous ReadWrite.Put +// returned successfully. +// +// Resumption only works on files that were created by a previous instance of a ReadWrite +// blockstore. This means a file created as a result of a successful call to OpenReadWrite can be +// resumed from as long as write operations such as ReadWrite.Put, ReadWrite.PutMany returned +// successfully. On resumption the roots argument and WithDataPadding option must match the +// previous instantiation of ReadWrite blockstore that created the file. More explicitly, the file +// resuming from must: +// 1. start with a complete CARv2 car.Pragma. +// 2. contain a complete CARv1 data header with root CIDs matching the CIDs passed to the +// constructor, starting at offset optionally padded by WithDataPadding, followed by zero or +// more complete data sections. If any corrupt data sections are present the resumption will fail. +// Note, if set previously, the blockstore must use the same WithDataPadding option as before, +// since this option is used to locate the CARv1 data payload. +// +// Note, resumption should be used with WithCidDeduplication, so that blocks that are successfully +// written into the file are not re-written. Unless, the user explicitly wants duplicate blocks. +// +// Resuming from finalized files is allowed. However, resumption will regenerate the index +// regardless by scanning every existing block in file. +func OpenReadWrite(path string, roots []cid.Cid, opts ...carv2.Option) (*ReadWrite, error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) // TODO: Should the user be able to configure FileMode permissions? + if err != nil { + return nil, fmt.Errorf("could not open read/write file: %w", err) + } + stat, err := f.Stat() + if err != nil { + // Note, we should not get a an os.ErrNotExist here because the flags used to open file includes os.O_CREATE + return nil, err + } + // Try and resume by default if the file size is non-zero. + resume := stat.Size() != 0 + // If construction of blockstore fails, make sure to close off the open file. + defer func() { + if err != nil { + f.Close() + } + }() + + // Instantiate block store. + // Set the header fileld before applying options since padding options may modify header. + rwbs := &ReadWrite{ + f: f, + idx: newInsertionIndex(), + header: carv2.NewHeader(0), + opts: carv2.ApplyOptions(opts...), + } + rwbs.ronly.opts = rwbs.opts + + if p := rwbs.opts.DataPadding; p > 0 { + rwbs.header = rwbs.header.WithDataPadding(p) + } + if p := rwbs.opts.IndexPadding; p > 0 { + rwbs.header = rwbs.header.WithIndexPadding(p) + } + + rwbs.dataWriter = NewOffsetWriter(rwbs.f, int64(rwbs.header.DataOffset)) + v1r := NewOffsetReadSeeker(rwbs.f, int64(rwbs.header.DataOffset)) + rwbs.ronly.backing = v1r + rwbs.ronly.idx = rwbs.idx + rwbs.ronly.carv2Closer = rwbs.f + + if resume { + if err = rwbs.resumeWithRoots(roots); err != nil { + return nil, err + } + } else { + if err = rwbs.initWithRoots(roots); err != nil { + return nil, err + } + } + + return rwbs, nil +} + +func (b *ReadWrite) initWithRoots(roots []cid.Cid) error { + if _, err := b.f.WriteAt(carv2.Pragma, 0); err != nil { + return err + } + return WriteHeader(&CarHeader{Roots: roots, Version: 1}, b.dataWriter) +} + +func (b *ReadWrite) resumeWithRoots(roots []cid.Cid) error { + // On resumption it is expected that the CARv2 Pragma, and the CARv1 header is successfully written. + // Otherwise we cannot resume from the file. + // Read pragma to assert if b.f is indeed a CARv2. + version, err := carv2.ReadVersion(b.f) + if err != nil { + // The file is not a valid CAR file and cannot resume from it. + // Or the write must have failed before pragma was written. + return err + } + if version != 2 { + // The file is not a CARv2 and we cannot resume from it. + return fmt.Errorf("cannot resume on CAR file with version %v", version) + } + + // Check if file was finalized by trying to read the CARv2 header. + // We check because if finalized the CARv1 reader behaviour needs to be adjusted since + // EOF will not signify end of CARv1 payload. i.e. index is most likely present. + var headerInFile carv2.Header + _, err = headerInFile.ReadFrom(NewOffsetReadSeeker(b.f, carv2.PragmaSize)) + + // If reading CARv2 header succeeded, and CARv1 offset in header is not zero then the file is + // most-likely finalized. Check padding and truncate the file to remove index. + // Otherwise, carry on reading the v1 payload at offset determined from b.header. + if err == nil && headerInFile.DataOffset != 0 { + if headerInFile.DataOffset != b.header.DataOffset { + // Assert that the padding on file matches the given WithDataPadding option. + wantPadding := headerInFile.DataOffset - carv2.PragmaSize - carv2.HeaderSize + gotPadding := b.header.DataOffset - carv2.PragmaSize - carv2.HeaderSize + return fmt.Errorf( + "cannot resume from file with mismatched CARv1 offset; "+ + "`WithDataPadding` option must match the padding on file. "+ + "Expected padding value of %v but got %v", wantPadding, gotPadding, + ) + } else if headerInFile.DataSize == 0 { + // If CARv1 size is zero, since CARv1 offset wasn't, then the CARv2 header was + // most-likely partially written. Since we write the header last in Finalize then the + // file most-likely contains the index and we cannot know where it starts, therefore + // can't resume. + return errors.New("corrupt CARv2 header; cannot resume from file") + } + } + + // Use the given CARv1 padding to instantiate the CARv1 reader on file. + v1r := NewOffsetReadSeeker(b.ronly.backing, 0) + header, err := ReadHeader(v1r) + if err != nil { + // Cannot read the CARv1 header; the file is most likely corrupt. + return fmt.Errorf("error reading car header: %w", err) + } + if !header.Matches(CarHeader{Roots: roots, Version: 1}) { + // Cannot resume if version and root does not match. + return errors.New("cannot resume on file with mismatching data header") + } + + if headerInFile.DataOffset != 0 { + // If header in file contains the size of car v1, then the index is most likely present. + // Since we will need to re-generate the index, as the one in file is flattened, truncate + // the file so that the Readonly.backing has the right set of bytes to deal with. + // This effectively means resuming from a finalized file will wipe its index even if there + // are no blocks put unless the user calls finalize. + if err := b.f.Truncate(int64(headerInFile.DataOffset + headerInFile.DataSize)); err != nil { + return err + } + } + // Now that CARv2 header is present on file, clear it to avoid incorrect size and offset in + // header in case blocksotre is closed without finalization and is resumed from. + if err := b.unfinalize(); err != nil { + return fmt.Errorf("could not un-finalize: %w", err) + } + + // TODO See how we can reduce duplicate code here. + // The code here comes from car.GenerateIndex. + // Copied because we need to populate an insertindex, not a sorted index. + // Producing a sorted index via generate, then converting it to insertindex is not possible. + // Because Index interface does not expose internal records. + // This may be done as part of https://github.com/ipld/go-car/issues/95 + + offset, err := HeaderSize(header) + if err != nil { + return err + } + sectionOffset := int64(0) + if sectionOffset, err = v1r.Seek(int64(offset), io.SeekStart); err != nil { + return err + } + + for { + // Grab the length of the section. + // Note that ReadUvarint wants a ByteReader. + length, err := varint.ReadUvarint(v1r) + if err != nil { + if err == io.EOF { + break + } + return err + } + + // Null padding; by default it's an error. + if length == 0 { + if b.ronly.opts.ZeroLengthSectionAsEOF { + break + } else { + return fmt.Errorf("carv1 null padding not allowed by default; see WithZeroLegthSectionAsEOF") + } + } + + // Grab the CID. + n, c, err := cid.CidFromReader(v1r) + if err != nil { + return err + } + b.idx.insertNoReplace(c, uint64(sectionOffset)) + + // Seek to the next section by skipping the block. + // The section length includes the CID, so subtract it. + if sectionOffset, err = v1r.Seek(int64(length)-int64(n), io.SeekCurrent); err != nil { + return err + } + } + // Seek to the end of last skipped block where the writer should resume writing. + _, err = b.dataWriter.Seek(sectionOffset, io.SeekStart) + return err +} + +func (b *ReadWrite) unfinalize() error { + _, err := new(carv2.Header).WriteTo(NewOffsetWriter(b.f, carv2.PragmaSize)) + return err +} + +// Put puts a given block to the underlying datastore +func (b *ReadWrite) Put(ctx context.Context, blk blocks.Block) error { + // PutMany already checks b.ronly.closed. + return b.PutMany(ctx, []blocks.Block{blk}) +} + +// PutMany puts a slice of blocks at the same time using batching +// capabilities of the underlying datastore whenever possible. +func (b *ReadWrite) PutMany(ctx context.Context, blks []blocks.Block) error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + return errClosed + } + + for _, bl := range blks { + c := bl.Cid() + + // If StoreIdentityCIDs option is disabled then treat IDENTITY CIDs like IdStore. + if !b.opts.StoreIdentityCIDs { + // Check for IDENTITY CID. If IDENTITY, ignore and move to the next block. + if _, ok, err := isIdentity(c); err != nil { + return err + } else if ok { + continue + } + } + + // Check if its size is too big. + // If larger than maximum allowed size, return error. + // Note, we need to check this regardless of whether we have IDENTITY CID or not. + // Since multhihash codes other than IDENTITY can result in large digests. + cSize := uint64(len(c.Bytes())) + if cSize > b.opts.MaxIndexCidSize { + return &carv2.ErrCidTooLarge{MaxSize: b.opts.MaxIndexCidSize, CurrentSize: cSize} + } + + if !b.opts.BlockstoreAllowDuplicatePuts { + if b.ronly.opts.BlockstoreUseWholeCIDs && b.idx.hasExactCID(c) { + continue // deduplicated by CID + } + if !b.ronly.opts.BlockstoreUseWholeCIDs { + _, err := b.idx.Get(c) + if err == nil { + continue // deduplicated by hash + } + } + } + + n := uint64(b.dataWriter.Position()) + if err := util.LdWrite(b.dataWriter, c.Bytes(), bl.RawData()); err != nil { + return err + } + b.idx.insertNoReplace(c, n) + } + return nil +} + +// Discard closes this blockstore without finalizing its header and index. +// After this call, the blockstore can no longer be used. +// +// Note that this call may block if any blockstore operations are currently in +// progress, including an AllKeysChan that hasn't been fully consumed or cancelled. +func (b *ReadWrite) Discard() { + // Same semantics as ReadOnly.Close, including allowing duplicate calls. + // The only difference is that our method is called Discard, + // to further clarify that we're not properly finalizing and writing a + // CARv2 file. + b.ronly.Close() +} + +// Finalize finalizes this blockstore by writing the CARv2 header, along with flattened index +// for more efficient subsequent read. +// After this call, the blockstore can no longer be used. +func (b *ReadWrite) Finalize() error { + b.ronly.mu.Lock() + defer b.ronly.mu.Unlock() + + if b.ronly.closed { + // Allow duplicate Finalize calls, just like Close. + // Still error, just like ReadOnly.Close; it should be discarded. + return fmt.Errorf("called Finalize on a closed blockstore") + } + + // TODO check if add index option is set and don't write the index then set index offset to zero. + b.header = b.header.WithDataSize(uint64(b.dataWriter.Position())) + b.header.Characteristics.SetFullyIndexed(b.opts.StoreIdentityCIDs) + + // Note that we can't use b.Close here, as that tries to grab the same + // mutex we're holding here. + defer b.ronly.closeWithoutMutex() + + // TODO if index not needed don't bother flattening it. + fi, err := b.idx.flatten(b.opts.IndexCodec) + if err != nil { + return err + } + if _, err := index.WriteTo(fi, NewOffsetWriter(b.f, int64(b.header.IndexOffset))); err != nil { + return err + } + if _, err := b.header.WriteTo(NewOffsetWriter(b.f, carv2.PragmaSize)); err != nil { + return err + } + + if err := b.ronly.closeWithoutMutex(); err != nil { + return err + } + return nil +} + +func (b *ReadWrite) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.ronly.AllKeysChan(ctx) +} + +func (b *ReadWrite) Has(ctx context.Context, key cid.Cid) (bool, error) { + return b.ronly.Has(ctx, key) +} + +func (b *ReadWrite) Get(ctx context.Context, key cid.Cid) (blocks.Block, error) { + return b.ronly.Get(ctx, key) +} + +func (b *ReadWrite) GetSize(ctx context.Context, key cid.Cid) (int, error) { + return b.ronly.GetSize(ctx, key) +} + +func (b *ReadWrite) DeleteBlock(_ context.Context, _ cid.Cid) error { + return fmt.Errorf("ReadWrite blockstore does not support deleting blocks") +} + +func (b *ReadWrite) HashOnRead(enable bool) { + b.ronly.HashOnRead(enable) +} + +func (b *ReadWrite) Roots() ([]cid.Cid, error) { + return b.ronly.Roots() +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores.go new file mode 100644 index 00000000000..9cf2413f34a --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores.go @@ -0,0 +1,60 @@ +package stores + +import ( + "io" + "sync" + + bstore "github.com/ipfs/go-ipfs-blockstore" + "golang.org/x/xerrors" +) + +// ReadOnlyBlockstores tracks open read blockstores. +type ReadOnlyBlockstores struct { + mu sync.RWMutex + stores map[string]bstore.Blockstore +} + +func NewReadOnlyBlockstores() *ReadOnlyBlockstores { + return &ReadOnlyBlockstores{ + stores: make(map[string]bstore.Blockstore), + } +} + +func (r *ReadOnlyBlockstores) Track(key string, bs bstore.Blockstore) (bool, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.stores[key]; ok { + return false, nil + } + + r.stores[key] = bs + return true, nil +} + +func (r *ReadOnlyBlockstores) Get(key string) (bstore.Blockstore, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + + return nil, xerrors.Errorf("could not get blockstore for key %s: %w", key, ErrNotFound) +} + +func (r *ReadOnlyBlockstores) Untrack(key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + delete(r.stores, key) + if closer, ok := bs.(io.Closer); ok { + if err := closer.Close(); err != nil { + return xerrors.Errorf("failed to close read-only blockstore: %w", err) + } + } + } + + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores_test.go new file mode 100644 index 00000000000..ce53175acb0 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/ro_bstores_test.go @@ -0,0 +1,85 @@ +package stores_test + +import ( + "context" + "path/filepath" + "testing" + + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/dagstore" + + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/stores" +) + +func TestReadOnlyStoreTracker(t *testing.T) { + ctx := context.Background() + + // Create a CARv2 file from a fixture + testData := tut.NewLibp2pTestData(ctx, t) + + fpath1 := filepath.Join(tut.ThisDir(t), "../retrievalmarket/impl/fixtures/lorem.txt") + _, carFilePath := testData.LoadUnixFSFileToStore(t, fpath1) + + fpath2 := filepath.Join(tut.ThisDir(t), "../retrievalmarket/impl/fixtures/lorem_under_1_block.txt") + _, carFilePath2 := testData.LoadUnixFSFileToStore(t, fpath2) + + rdOnlyBS1, err := blockstore.OpenReadOnly(carFilePath, carv2.ZeroLengthSectionAsEOF(true), blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + + rdOnlyBS2, err := blockstore.OpenReadOnly(carFilePath2, carv2.ZeroLengthSectionAsEOF(true), blockstore.UseWholeCIDs(true)) + require.NoError(t, err) + + len1 := getBstoreLen(ctx, t, rdOnlyBS1) + + k1 := "k1" + k2 := "k2" + tracker := stores.NewReadOnlyBlockstores() + + // Get a non-existent key + _, err = tracker.Get(k1) + require.True(t, stores.IsNotFound(err)) + + // Add a read-only blockstore + ok, err := tracker.Track(k1, rdOnlyBS1) + require.NoError(t, err) + require.True(t, ok) + + // Get the blockstore using its key + got, err := tracker.Get(k1) + require.NoError(t, err) + + // Verify the blockstore is the same + lenGot := getBstoreLen(ctx, t, got) + require.Equal(t, len1, lenGot) + + // Call GetOrOpen with a different CAR file + ok, err = tracker.Track(k2, rdOnlyBS2) + require.NoError(t, err) + require.True(t, ok) + + // Verify the blockstore is different + len2 := getBstoreLen(ctx, t, rdOnlyBS2) + require.NotEqual(t, len1, len2) + + // Untrack the second blockstore from the tracker + err = tracker.Untrack(k2) + require.NoError(t, err) + + // Verify it's been removed + _, err = tracker.Get(k2) + require.True(t, stores.IsNotFound(err)) +} + +func getBstoreLen(ctx context.Context, t *testing.T, bs dagstore.ReadBlockstore) int { + ch, err := bs.AllKeysChan(ctx) + require.NoError(t, err) + var len int + for range ch { + len++ + } + return len +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores.go new file mode 100644 index 00000000000..ca4130ffc06 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores.go @@ -0,0 +1,62 @@ +package stores + +import ( + "sync" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-car/v2/blockstore" + "golang.org/x/xerrors" +) + +// ReadWriteBlockstores tracks open ReadWrite CAR blockstores. +type ReadWriteBlockstores struct { + mu sync.RWMutex + stores map[string]*blockstore.ReadWrite +} + +func NewReadWriteBlockstores() *ReadWriteBlockstores { + return &ReadWriteBlockstores{ + stores: make(map[string]*blockstore.ReadWrite), + } +} + +func (r *ReadWriteBlockstores) Get(key string) (*blockstore.ReadWrite, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + return nil, xerrors.Errorf("could not get blockstore for key %s: %w", key, ErrNotFound) +} + +func (r *ReadWriteBlockstores) GetOrOpen(key string, path string, rootCid cid.Cid) (*blockstore.ReadWrite, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + return bs, nil + } + + bs, err := blockstore.OpenReadWrite(path, []cid.Cid{rootCid}, blockstore.UseWholeCIDs(true)) + if err != nil { + return nil, xerrors.Errorf("failed to create read-write blockstore: %w", err) + } + r.stores[key] = bs + return bs, nil +} + +func (r *ReadWriteBlockstores) Untrack(key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if bs, ok := r.stores[key]; ok { + // If the blockstore has already been finalized, calling Finalize again + // will return an error. For our purposes it's simplest if Finalize is + // idempotent so we just ignore any error. + _ = bs.Finalize() + } + + delete(r.stores, key) + return nil +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores_test.go b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores_test.go new file mode 100644 index 00000000000..587274541ff --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/stores/rw_bstores_test.go @@ -0,0 +1,67 @@ +package stores_test + +import ( + "context" + "path/filepath" + "testing" + + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/stretchr/testify/require" + + tut "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-fil-markets/stores" +) + +func TestReadWriteStoreTracker(t *testing.T) { + ctx := context.Background() + + // Create a CARv2 file from a fixture + testData := tut.NewLibp2pTestData(ctx, t) + + fpath1 := filepath.Join(tut.ThisDir(t), "../retrievalmarket/impl/fixtures/lorem.txt") + lnk1, carFilePath1 := testData.LoadUnixFSFileToStore(t, fpath1) + rootCidLnk1, ok := lnk1.(cidlink.Link) + require.True(t, ok) + + fpath2 := filepath.Join(tut.ThisDir(t), "../retrievalmarket/impl/fixtures/lorem_under_1_block.txt") + lnk2, carFilePath2 := testData.LoadUnixFSFileToStore(t, fpath2) + rootCidLnk2, ok := lnk2.(cidlink.Link) + require.True(t, ok) + + k1 := "k1" + k2 := "k2" + tracker := stores.NewReadWriteBlockstores() + + // Get a non-existent key + _, err := tracker.Get(k1) + require.True(t, stores.IsNotFound(err)) + + // Create a blockstore by calling GetOrOpen + rdOnlyBS1, err := tracker.GetOrOpen(k1, carFilePath1, rootCidLnk1.Cid) + require.NoError(t, err) + + // Get the blockstore using its key + got, err := tracker.Get(k1) + require.NoError(t, err) + + // Verify the blockstore is the same + len1 := getBstoreLen(ctx, t, rdOnlyBS1) + lenGot := getBstoreLen(ctx, t, got) + require.Equal(t, len1, lenGot) + + // Call GetOrOpen with a different CAR file + rdOnlyBS2, err := tracker.GetOrOpen(k2, carFilePath2, rootCidLnk2.Cid) + require.NoError(t, err) + + // Verify the blockstore is different + len2 := getBstoreLen(ctx, t, rdOnlyBS2) + require.NotEqual(t, len1, len2) + + // Untrack the second blockstore from the tracker + err = tracker.Untrack(k2) + require.NoError(t, err) + + // Verify it's been removed + _, err = tracker.Get(k2) + require.True(t, stores.IsNotFound(err)) +} diff --git a/extern/sxx-go-fil-markets@v1.24.0-v17/tools/tools.go b/extern/sxx-go-fil-markets@v1.24.0-v17/tools/tools.go new file mode 100644 index 00000000000..43a7071f357 --- /dev/null +++ b/extern/sxx-go-fil-markets@v1.24.0-v17/tools/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/hannahhoward/cbor-gen-for" +) diff --git a/go.mod b/go.mod index d51d7b05b08..ef967a28b18 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,7 @@ require ( github.com/prometheus/client_golang v1.12.1 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.3.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.8.1 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba @@ -137,7 +137,7 @@ require ( github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 - go.opencensus.io v0.23.0 + go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.7.0 go.opentelemetry.io/otel/bridge/opencensus v0.25.0 go.opentelemetry.io/otel/exporters/jaeger v1.2.0 @@ -145,16 +145,18 @@ require ( go.uber.org/fx v1.15.0 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.22.0 - golang.org/x/net v0.0.0-20220812174116-3211cb980234 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.12 - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f + golang.org/x/net v0.5.0 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.4.0 + golang.org/x/time v0.1.0 + golang.org/x/tools v0.3.0 + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible ) +require git.sxxfuture.net/external-archive/github/moran666666/sector-counter v0.0.0-20230223190935-4427560a2c5a + require ( github.com/GeertJohan/go.incremental v1.0.0 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect @@ -167,7 +169,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cheekybits/genny v1.0.0 // indirect github.com/cilium/ebpf v0.4.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect @@ -211,7 +213,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.8 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect @@ -321,11 +323,11 @@ require ( go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 // indirect - google.golang.org/grpc v1.45.0 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/term v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -337,3 +339,5 @@ require ( replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors + +replace github.com/filecoin-project/go-fil-markets => ./extern/sxx-go-fil-markets@v1.24.0-v17 diff --git a/go.sum b/go.sum index 0de078b5c52..29bbbeff872 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,7 @@ cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgo cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,23 +16,376 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -41,6 +395,8 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.sxxfuture.net/external-archive/github/moran666666/sector-counter v0.0.0-20230223190935-4427560a2c5a h1:bUYbcSpAXVY5ERoiYm/SCJyRM1wc13q2OoriiKSgLVY= +git.sxxfuture.net/external-archive/github/moran666666/sector-counter v0.0.0-20230223190935-4427560a2c5a/go.mod h1:cBz3Rt2yQTXJka6Ve/jgUw4vjFxaQn4B09GrkQom2sk= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= @@ -148,11 +504,14 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -167,12 +526,17 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= @@ -267,10 +631,16 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/etclabscore/go-jsonschema-walk v0.0.6 h1:DrNzoKWKd8f8XB5nFGBY00IcjakRE22OTI12k+2LkyY= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= github.com/etclabscore/go-openrpc-reflect v0.0.36 h1:kSqNB2U8RVoW4si+4fsv13NGNkRAQ5j78zTUx1qiehk= @@ -318,8 +688,6 @@ github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88Oq github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.24.0-v17 h1:YjT0usMeR6kdAo3RBfftTPe5bNIgNmBbo5YzJHF1iLk= -github.com/filecoin-project/go-fil-markets v1.24.0-v17/go.mod h1:JW/UHkHDqP4MikCIIWNY5IHvTTsdv/zNMk9jJXKzhIU= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -485,6 +853,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -503,10 +872,12 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -524,8 +895,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -534,6 +906,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -541,6 +915,14 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -548,10 +930,23 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= @@ -579,6 +974,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= @@ -627,7 +1024,9 @@ github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3 github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= @@ -939,6 +1338,7 @@ github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfo github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -1268,6 +1668,8 @@ github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tW github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= @@ -1516,6 +1918,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1660,6 +2064,9 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= @@ -1673,8 +2080,9 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1683,8 +2091,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -1777,6 +2186,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1802,9 +2212,11 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= @@ -1831,6 +2243,7 @@ go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3 go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1890,6 +2303,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1904,10 +2318,13 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1939,6 +2356,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= @@ -1951,9 +2369,13 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2005,22 +2427,36 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2028,8 +2464,27 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2042,8 +2497,11 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2117,17 +2575,24 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2137,38 +2602,65 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2225,20 +2717,33 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2259,6 +2764,41 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2267,6 +2807,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2303,8 +2844,89 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= -google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2329,10 +2951,31 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2346,6 +2989,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/localnet.json b/localnet.json new file mode 100644 index 00000000000..d2dc64e95a7 --- /dev/null +++ b/localnet.json @@ -0,0 +1,30 @@ +{ + "NetworkVersion": 16, + "Accounts": [], + "Miners": [], + "NetworkName": "localnet-7347b85d-f807-438c-a28b-893d774d0637", + "VerifregRootKey": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + }, + "RemainderAccount": { + "Type": "multisig", + "Balance": "0", + "Meta": { + "Signers": [ + "t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy" + ], + "Threshold": 1, + "VestingDuration": 0, + "VestingStart": 0 + } + } +} \ No newline at end of file diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index a5022613b6b..5adb48e45b9 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -87,6 +87,70 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) } +// add by lin +func (n *ProviderNodeAdapter) OnDealCompleteOfSxx(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { + if deal.PublishCid == nil { + return nil, xerrors.Errorf("deal.PublishCid can't be nil") + } + + sdInfo := api.PieceDealInfo{ + DealID: deal.DealID, + DealProposal: &deal.Proposal, + PublishCid: deal.PublishCid, + DealSchedule: api.DealSchedule{ + StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, + EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, + }, + KeepUnsealed: deal.FastRetrieval, + RemoteFilepath: deal.RemoteFilepath, + Worker: deal.Worker, + } + + // Attempt to add the piece to the sector + p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) + curTime := build.Clock.Now() + for build.Clock.Since(curTime) < addPieceRetryTimeout { + // Check if there was an error because of too many sectors being sealed + if !xerrors.Is(err, pipeline.ErrTooManySectorsSealing) { + if err != nil { + log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) + } + + // There was either a fatal error or no error. In either case + // don't retry AddPiece + break + } + + // The piece could not be added to the sector because there are too + // many sectors being sealed, back-off for a while before trying again + select { + case <-build.Clock.After(addPieceRetryWait): + // Reset the reader to the start + err = pieceData.SeekStart() + if err != nil { + return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err) + } + + // Attempt to add the piece again + p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) + case <-ctx.Done(): + return nil, xerrors.New("context expired while waiting to retry AddPiece") + } + } + + if err != nil { + return nil, xerrors.Errorf("AddPiece failed: %s", err) + } + log.Warnf("New Deal: deal %d", deal.DealID) + + return &storagemarket.PackingResult{ + SectorNumber: p, + Offset: offset, + Size: pieceSize.Padded(), + }, nil +} +// end + func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { if deal.PublishCid == nil { return nil, xerrors.Errorf("deal.PublishCid can't be nil") diff --git a/miner/miner.go b/miner/miner.go index 4952f95fb61..1b3c2ceb590 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -144,7 +144,15 @@ func (m *Miner) Start(_ context.Context) error { return fmt.Errorf("miner already started") } m.stop = make(chan struct{}) - go m.mine(context.TODO()) + //go m.mine(context.TODO()) + + // change by sxx + if _, ok := os.LookupEnv("LOTUS_WNPOST"); ok { + go m.mine(context.TODO()) + } else { + log.Warnf("This miner will be disable minning block function.") + } + // end return nil } diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 3bfd84f08a1..5a50c8b942a 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -269,11 +269,21 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt Proposal: *dealProposal, ClientSignature: *dealProposalSig, } + + // change by pan + peerid := *mi.PeerId + if params.Peerid != nil { + peerid = *params.Peerid + log.Infof("miner peerid %s/*", peerid) + } + dStream, err := network.NewFromLibp2pHost(a.Host, // params duplicated from .../node/modules/client.go // https://github.com/filecoin-project/lotus/pull/5961#discussion_r629768011 network.RetryParameters(time.Second, 5*time.Minute, 15, 5), - ).NewDealStream(ctx, *mi.PeerId) + ).NewDealStream(ctx, peerid) + // end + if err != nil { return nil, xerrors.Errorf("opening dealstream to %s/%s failed: %w", params.Miner, *mi.PeerId, err) } @@ -312,6 +322,123 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt return &resp.Response.Proposal, nil } +func (a *API) ClientStatelessDealSxx(ctx context.Context, params *api.StartDealParams) (*network.Proposal, error) { + if params.Data.TransferType != storagemarket.TTManual { + return nil, xerrors.Errorf("invalid transfer type %s for stateless storage deal", params.Data.TransferType) + } + if !params.EpochPrice.IsZero() { + return nil, xerrors.New("stateless storage deals can only be initiated with storage price of 0") + } + + walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed resolving params.Wallet addr (%s): %w", params.Wallet, err) + } + + exist, err := a.WalletHas(ctx, walletKey) + if err != nil { + return nil, xerrors.Errorf("failed getting addr from wallet (%s): %w", params.Wallet, err) + } + if !exist { + return nil, xerrors.Errorf("provided address doesn't exist in wallet") + } + + mi, err := a.StateMinerInfo(ctx, params.Miner, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed getting peer ID: %w", err) + } + + md, err := a.StateMinerProvingDeadline(ctx, params.Miner, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed getting miner's deadline info: %w", err) + } + + if uint64(params.Data.PieceSize.Padded()) > uint64(mi.SectorSize) { + return nil, xerrors.New("data doesn't fit in a sector") + } + + dealStart := params.DealStartEpoch + if dealStart <= 0 { // unset, or explicitly 'epoch undefined' + ts, err := a.ChainHead(ctx) + if err != nil { + return nil, xerrors.Errorf("failed getting chain height: %w", err) + } + + blocksPerHour := 60 * 60 / build.BlockDelaySecs + dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask + } + + // + // stateless flow from here to the end + // + + label, err := markettypes.NewLabelFromString(params.Data.Root.Encode(multibase.MustNewEncoder('u'))) + if err != nil { + return nil, xerrors.Errorf("failed to encode label: %w", err) + } + + dealProposal := &markettypes.DealProposal{ + PieceCID: *params.Data.PieceCid, + PieceSize: params.Data.PieceSize.Padded(), + Client: walletKey, + Provider: params.Miner, + Label: label, + StartEpoch: dealStart, + EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: params.ProviderCollateral, + ClientCollateral: big.Zero(), + VerifiedDeal: params.VerifiedDeal, + } + + if dealProposal.ProviderCollateral.IsZero() { + networkCollateral, err := a.StateDealProviderCollateralBounds(ctx, params.Data.PieceSize.Padded(), params.VerifiedDeal, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed to determine minimum provider collateral: %w", err) + } + dealProposal.ProviderCollateral = networkCollateral.Min + } + + dealProposalSerialized, err := cborutil.Dump(dealProposal) + if err != nil { + return nil, xerrors.Errorf("failed to serialize deal proposal: %w", err) + } + + dealProposalSig, err := a.WalletSign(ctx, walletKey, dealProposalSerialized) + if err != nil { + return nil, xerrors.Errorf("failed to sign proposal : %w", err) + } + + dealProposalSigned := &markettypes.ClientDealProposal{ + Proposal: *dealProposal, + ClientSignature: *dealProposalSig, + } + + // change by pan + peerid := *mi.PeerId + if params.Peerid != nil { + peerid = *params.Peerid + log.Infof("miner peerid %s/*", peerid) + } + + if err != nil { + return nil, xerrors.Errorf("opening dealstream to %s/%s failed: %w", params.Miner, *mi.PeerId, err) + } + + proposal := network.Proposal{ + FastRetrieval: true, + DealProposal: dealProposalSigned, + Piece: &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: params.Data.Root, + PieceCid: params.Data.PieceCid, + PieceSize: params.Data.PieceSize, + }, + } + + return &proposal, nil +} + func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { deals, err := a.SMDealClient.ListLocalDeals(ctx) if err != nil { diff --git a/node/impl/storminer.go b/node/impl/storminer.go index c26d504105e..b6ca11443fa 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -384,6 +384,10 @@ func (sm *StorageMinerAPI) SectorsUpdate(ctx context.Context, id abi.SectorNumbe return sm.Miner.ForceSectorState(ctx, id, sealing.SectorState(state)) } +func (sm *StorageMinerAPI) SectorsUpdateOfSxx(ctx context.Context, id abi.SectorNumber, state api.SectorState, worker string) error { + return sm.Miner.ForceSectorStateOfSxx(ctx, id, sealing.SectorState(state), worker) +} + func (sm *StorageMinerAPI) SectorRemove(ctx context.Context, id abi.SectorNumber) error { return sm.Miner.RemoveSector(ctx, id) } @@ -1224,6 +1228,13 @@ func (sm *StorageMinerAPI) DealsSetExpectedSealDurationFunc(ctx context.Context, return sm.SetExpectedSealDurationFunc(d) } +// add by lin +func (sm *StorageMinerAPI) DealsImportDataOfSxx(ctx context.Context, deal cid.Cid, fname string, worker string) error { + + return sm.StorageProvider.ImportDataForDealOfSxx(ctx, deal, fname, worker) +} +// end + func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error { fi, err := os.Open(fname) if err != nil { @@ -1324,6 +1335,35 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP return out, nil } +func (sm *StorageMinerAPI) CheckProve(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) { + var rg storiface.RGetter + if expensive { + rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) { + si, err := sm.Miner.SectorsStatus(ctx, id.Number, false) + if err != nil { + return cid.Undef, false, err + } + if si.CommR == nil { + return cid.Undef, false, xerrors.Errorf("commr is nil") + } + + return *si.CommR, si.ReplicaUpdateMessage != nil, nil + } + } + + bad, err := sm.StorageMgr.CheckProve(ctx, pp, sectors, update, rg) + if err != nil { + return nil, err + } + + var out = make(map[abi.SectorNumber]string) + for sid, err := range bad { + out[sid.Number] = err + } + + return out, nil +} + func (sm *StorageMinerAPI) ActorAddressConfig(ctx context.Context) (api.AddressConfig, error) { return sm.AddrSel.AddressConfig, nil } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 0d85cd168cc..bb785a9833b 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -323,7 +323,14 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func lc.Append(fx.Hook{ OnStart: func(context.Context) error { - go fps.Run(ctx) + //go fps.Run(ctx) + // change by sxx + if _, ok := os.LookupEnv("LOTUS_WDPOST"); ok { + go fps.Run(ctx) + } else { + log.Warnf("This miner will be disable windowPoSt.") + } + // end return nil }, }) diff --git a/storage/pipeline/fsm.go b/storage/pipeline/fsm.go index 0a75d88c8ee..2b6745eb018 100644 --- a/storage/pipeline/fsm.go +++ b/storage/pipeline/fsm.go @@ -10,6 +10,9 @@ import ( "net/http" "reflect" "time" + "os" + "path/filepath" + "io/ioutil" "golang.org/x/xerrors" @@ -71,9 +74,12 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto ), Packing: planOne(on(SectorPacked{}, GetTicket)), GetTicket: planOne( - on(SectorTicket{}, PreCommit1), + on(SectorTicket{}, WaitPC), + // on(SectorTicket{}, PreCommit1), on(SectorCommitFailed{}, CommitFailed), ), + WaitPC: planOne( + ), PreCommit1: planOne( on(SectorPreCommit1{}, PreCommit2), on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), @@ -114,9 +120,12 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorRetryPreCommit{}, PreCommitting), ), WaitSeed: planOne( - on(SectorSeedReady{}, Committing), + // on(SectorSeedReady{}, Committing), + on(SectorSeedReady{}, WaitC), on(SectorChainPreCommitFailed{}, PreCommitFailed), ), + WaitC: planOne( + ), Committing: planCommitting, CommitFinalize: planOne( on(SectorFinalized{}, SubmitCommit), @@ -483,6 +492,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handlePacking, processed, nil case GetTicket: return m.handleGetTicket, processed, nil + case WaitPC: + return m.handleWaitPC, processed, nil case PreCommit1: return m.handlePreCommit1, processed, nil case PreCommit2: @@ -497,6 +508,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta return m.handlePreCommitWait, processed, nil case WaitSeed: return m.handleWaitSeed, processed, nil + case WaitC: + return m.handleWaitC, processed, nil case Committing: return m.handleCommitting, processed, nil case SubmitCommit: @@ -713,6 +726,26 @@ func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, sta return m.sectors.Send(id, SectorForceState{state}) } +func (m *Sealing) ForceSectorStateOfSxx(ctx context.Context, id abi.SectorNumber, state SectorState, worker string) error { + minerpath := os.Getenv("LOTUS_MINER_PATH") + sectorspath := filepath.Join(minerpath, "./sectorsworker") + if err := os.MkdirAll(sectorspath, 0755); err != nil { + if !os.IsExist(err) { + return err + } + } + log.Errorf("change statue : %+v", state) + if state == SectorState("Committing") { + log.Errorf("change statue : %+v", state) + if err := ioutil.WriteFile(filepath.Join(sectorspath, id.String()), []byte(worker), 0666); err != nil { + return err + } + } + + m.startupWait.Wait() + return m.sectors.Send(id, SectorForceState{state}) +} + // as sector has been removed, it's no needs to care about later events, // just returns length of events as `processed` is ok. func finalRemoved(events []statemachine.Event, state *SectorInfo) (uint64, error) { diff --git a/storage/pipeline/input.go b/storage/pipeline/input.go index 631e84455c3..022ea5d2089 100644 --- a/storage/pipeline/input.go +++ b/storage/pipeline/input.go @@ -2,9 +2,14 @@ package sealing import ( "context" + "os" + "path/filepath" "sort" + "strings" "time" + // "io/ioutil" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -25,6 +30,8 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/filecoin-project/lotus/storage/sectorblocks" + + scClient "git.sxxfuture.net/external-archive/github/moran666666/sector-counter/client" ) func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) error { @@ -197,7 +204,10 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er return xerrors.Errorf("getting per-sector deal limit: %w", err) } + log.Errorw("zlin: handleAddPiece 1") + for i, piece := range pending { + log.Errorw("zlin: handleAddPiece 2") m.inputLk.Lock() deal, ok := m.pendingPieces[piece] m.inputLk.Unlock() @@ -205,6 +215,40 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) } + minerpath := os.Getenv("LOTUS_MINER_PATH") + workerpath := filepath.Join(minerpath, "sectorsworker") + if err := os.MkdirAll(workerpath, 0755); err != nil && !os.IsExist(err) { + log.Errorf("can't mkdir dir %+v, because : %+v", workerpath, err) + } + if err := os.WriteFile(filepath.Join(workerpath, sector.SectorNumber.String()), []byte(deal.deal.Worker), 0666); err != nil { + log.Errorf("can't write file %+v, because : %+v", workerpath, err) + } + + // workerpath := filepath.Join(minerpath, "worker", deal.deal.DealProposal.PieceCID.String()) + // log.Errorw("assign wrker workerpath " + workerpath) + // _, workererr := os.Stat(workerpath) + // if workererr == nil { + // sectorspath := filepath.Join(minerpath, "sectors") + // _, sectorerr := os.Stat(sectorspath) + // if sectorerr != nil { + // os.Mkdir(sectorspath, 0755) + // } + // minerSector := m.minerSectorID(sector.SectorNumber) + // sectorspath = filepath.Join(sectorspath, storiface.SectorName(minerSector)) + // log.Errorw("assign wrker sectorspath " + sectorspath) + // _, sectorerr = os.Stat(sectorspath) + // if sectorerr != nil { + // buffer, fileerr := ioutil.ReadFile(workerpath) + // if fileerr == nil { + // os.WriteFile(sectorspath, buffer, 0666) + // } + // } + // err := os.Remove(workerpath) + // if err != nil { + // log.Errorw("can't delete worker path :%+v", err) + // } + // } + if len(sector.dealIDs())+(i+1) > maxDeals { // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) @@ -246,16 +290,46 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er }) } - ppi, err := m.sealer.AddPiece(sealer.WithPriority(ctx.Context(), DealSectorPriority), - m.minerSector(sector.SectorType, sector.SectorNumber), - pieceSizes, - deal.size, - deal.data) - if err != nil { - err = xerrors.Errorf("writing piece: %w", err) - deal.accepted(sector.SectorNumber, offset, err) - return ctx.Send(SectorAddPieceFailed{err}) + // change by lin + var ppi abi.PieceInfo + if os.Getenv("LOTUS_OF_SXX") == "1" && deal.deal.RemoteFilepath != "" { + if !strings.HasPrefix(string(deal.deal.RemoteFilepath), "/") { + ppi, err = m.sealer.AddPiece(sealer.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + deal.size, + deal.data) + if err != nil { + err = xerrors.Errorf("writing piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + } else { + log.Errorf("zlin: remotefilepath is : %w", deal.deal.RemoteFilepath) + ppi, err = m.sealer.AddPieceOfSxx(sealer.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + deal.size, + deal.deal.RemoteFilepath) + if err != nil { + err = xerrors.Errorf("writing piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } + } + } else { + ppi, err = m.sealer.AddPiece(sealer.WithPriority(ctx.Context(), DealSectorPriority), + m.minerSector(sector.SectorType, sector.SectorNumber), + pieceSizes, + deal.size, + deal.data) + if err != nil { + err = xerrors.Errorf("writing piece: %w", err) + deal.accepted(sector.SectorNumber, offset, err) + return ctx.Send(SectorAddPieceFailed{err}) + } } + // end if !ppi.PieceCID.Equals(deal.deal.DealProposal.PieceCID) { err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.DealProposal.PieceCID, ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, err) @@ -759,12 +833,29 @@ func (m *Sealing) maybeUpgradeSector(ctx context.Context, sp abi.RegisteredSealP // call with m.inputLk func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi.RegisteredSealProof) (abi.SectorNumber, error) { - sid, err := m.NextSectorNumber(ctx) - if err != nil { - return 0, xerrors.Errorf("getting sector number: %w", err) + // sid, err := m.NextSectorNumber(ctx) + // if err != nil { + // return 0, xerrors.Errorf("getting sector number: %w", err) + // } + + // change by sxx + var sid abi.SectorNumber + if _, ok := os.LookupEnv("SC_TYPE"); ok { + sid0, err := scClient.NewClient().GetSectorID(context.Background(), "") + if err != nil { + return 0, xerrors.Errorf("getting sector number: %w", err) + } + sid = abi.SectorNumber(sid0) + } else { + sid0, err := m.NextSectorNumber(ctx) + if err != nil { + return 0, xerrors.Errorf("getting sector number: %w", err) + } + sid = sid0 } + // end - err = m.sealer.NewSector(ctx, m.minerSector(sp, sid)) + err := m.sealer.NewSector(ctx, m.minerSector(sp, sid)) if err != nil { return 0, xerrors.Errorf("initializing sector: %w", err) } diff --git a/storage/pipeline/sector_state.go b/storage/pipeline/sector_state.go index 7a56c136b52..98a25757661 100644 --- a/storage/pipeline/sector_state.go +++ b/storage/pipeline/sector_state.go @@ -65,6 +65,8 @@ var ExistSectorStateList = map[SectorState]struct{}{ FinalizeReplicaUpdateFailed: {}, AbortUpgrade: {}, ReceiveSector: {}, + WaitPC: {}, + WaitC: {}, } // cmd/lotus-miner/info.go defines CLI colors corresponding to these states @@ -153,6 +155,9 @@ const ( Removing SectorState = "Removing" RemoveFailed SectorState = "RemoveFailed" Removed SectorState = "Removed" + + WaitPC SectorState = "WaitPC" + WaitC SectorState = "WaitC" ) func toStatState(st SectorState, finEarly bool) statSectorState { diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index b40a9bf45b3..7d05028be67 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -209,6 +209,14 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e }) } +func (m *Sealing) handleWaitPC(ctx statemachine.Context, sector SectorInfo) error { + return nil +} + +func (m *Sealing) handleWaitC(ctx statemachine.Context, sector SectorInfo) error { + return nil +} + func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error { if err := checkPieces(ctx.Context(), m.maddr, sector.SectorNumber, sector.Pieces, m.Api, false); err != nil { // Sanity check state switch err.(type) { diff --git a/storage/sealer/faults.go b/storage/sealer/faults.go index e05bbb7b857..0cc9d8e1d76 100644 --- a/storage/sealer/faults.go +++ b/storage/sealer/faults.go @@ -13,6 +13,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/storage/sealer/storiface" + + "path/filepath" + "os" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" ) var PostCheckTimeout = 160 * time.Second @@ -20,6 +24,7 @@ var PostCheckTimeout = 160 * time.Second // FaultTracker TODO: Track things more actively type FaultTracker interface { CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) + CheckProve(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) } // CheckProvable returns unprovable sectors @@ -131,3 +136,162 @@ func (m *Manager) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, } var _ FaultTracker = &Manager{} + +//////////////////////////////////////// + +// CheckProve returns unprovable sectors +func (m *Manager) CheckProve(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, update []bool, rg storiface.RGetter) (map[abi.SectorID]string, error) { + var bad = make(map[abi.SectorID]string) + + ssize, err := pp.SectorSize() + if err != nil { + return nil, err + } + + // TODO: More better checks + for i, sector := range sectors { + err := func() error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var fReplica string + var fCache string + + if update[i] { + lockedUpdate, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone) + if err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + if !lockedUpdate { + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock on update replica", "sector", sector) + bad[sector.ID] = fmt.Sprint("can't acquire read lock") + return nil + } + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: acquire sector update replica in checkProvable", "sector", sector, "error", err) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) + return nil + } + fReplica, fCache = lp.Update, lp.UpdateCache + } else { + locked, err := m.index.StorageTryLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTNone) + if err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + if !locked { + log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) + bad[sector.ID] = fmt.Sprint("can't acquire read lock") + return nil + } + + lp, _, err := m.localStore.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err) + bad[sector.ID] = fmt.Sprintf("acquire sector failed: %s", err) + return nil + } + fReplica, fCache = lp.Sealed, lp.Cache + } + if fReplica == "" || fCache == "" { + log.Warnw("CheckProvable Sector FAULT: cache and/or sealed paths not found", "sector", sector, "sealed", fReplica, "cache", fCache) + bad[sector.ID] = fmt.Sprintf("cache and/or sealed paths not found, cache %q, sealed %q", fCache, fReplica) + return nil + } + + toCheck := map[string]int64{ + fReplica: 1, + filepath.Join(fCache, "p_aux"): 0, + } + + addCachePathsForSectorSize(toCheck, fCache, ssize) + + for p, sz := range toCheck { + st, err := os.Stat(p) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "err", err) + bad[sector.ID] = fmt.Sprintf("%s", err) + return nil + } + + if sz != 0 { + if st.Size() != int64(ssize)*sz { + log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", fReplica, "cache", fCache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz) + bad[sector.ID] = fmt.Sprintf("%s is wrong size (got %d, expect %d)", p, st.Size(), int64(ssize)*sz) + return nil + } + } + } + + if rg != nil { + wpp, err := sector.ProofType.RegisteredWindowPoStProof() + if err != nil { + return err + } + + var pr abi.PoStRandomness = make([]byte, abi.RandomnessLength) + _, _ = rand.Read(pr) + pr[31] &= 0x3f + + ch, err := ffi.GeneratePoStFallbackSectorChallenges(wpp, sector.ID.Miner, pr, []abi.SectorNumber{ + sector.ID.Number, + }) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) + bad[sector.ID] = fmt.Sprintf("generating fallback challenges: %s", err) + return nil + } + + commr, _, err := rg(ctx, sector.ID) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) + bad[sector.ID] = fmt.Sprintf("getting commR: %s", err) + return nil + } + _, err = ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{ + SectorInfo: proof.SectorInfo{ + SealProof: sector.ProofType, + SectorNumber: sector.ID.Number, + SealedCID: commr, + }, + CacheDirPath: fCache, + PoStProofType: wpp, + SealedSectorPath: fReplica, + }, ch.Challenges[sector.ID.Number]) + if err != nil { + log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "sealed", fReplica, "cache", fCache, "err", err) + bad[sector.ID] = fmt.Sprintf("generating vanilla proof: %s", err) + return nil + } + } + + return nil + }() + if err != nil { + return nil, err + } + } + + return bad, nil +} + +func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi.SectorSize) { + switch ssize { + case 2 << 10: + fallthrough + case 8 << 20: + fallthrough + case 512 << 20: + chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0 + case 32 << 30: + for i := 0; i < 8; i++ { + chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 + } + case 64 << 30: + for i := 0; i < 16; i++ { + chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0 + } + default: + log.Warnf("not checking cache files of %s sectors for faults", ssize) + } +} \ No newline at end of file diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go index 67d519259b1..91e0d94c7e2 100644 --- a/storage/sealer/ffiwrapper/sealer_cgo.go +++ b/storage/sealer/ffiwrapper/sealer_cgo.go @@ -36,10 +36,21 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/fr32" "github.com/filecoin-project/lotus/storage/sealer/partialfile" "github.com/filecoin-project/lotus/storage/sealer/storiface" + // add by lin + "github.com/filecoin-project/go-fil-markets/shared" + // end + + "strings" ) var _ storiface.Storage = &Sealer{} +// add by lin +type CarPath struct { + Path []string +} +// end + func New(sectors SectorProvider) (*Sealer, error) { sb := &Sealer{ sectors: sectors, @@ -188,7 +199,277 @@ func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, }, nil } +// add by lin +func (sb *Sealer) AddPieceOfSxx(ctx context.Context, sector storiface.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, path string) (abi.PieceInfo, error) { + // add by lin + isRealData := true + sectortype := os.Getenv("LOTUS_SECTOR_TYPE_SXX") + if sectortype == "1" { + isRealData = false + } + if !isRealData { + if mypieceInfo, err := sb.myAddPiece(ctx, sector, pieceSize); err == nil { + return mypieceInfo, nil + } else { + log.Warn(err) + } + } + // end + + // TODO: allow tuning those: + chunk := abi.PaddedPieceSize(4 << 20) + parallel := runtime.NumCPU() + + var offset abi.UnpaddedPieceSize + for _, size := range existingPieceSizes { + offset += size + } + + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return abi.PieceInfo{}, err + } + + maxPieceSize := abi.PaddedPieceSize(ssize) + + if offset.Padded()+pieceSize.Padded() > maxPieceSize { + return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset) + } + + var done func() + var stagedFile *partialfile.PartialFile + + defer func() { + if done != nil { + done() + } + + if stagedFile != nil { + if err := stagedFile.Close(); err != nil { + log.Errorf("closing staged file: %+v", err) + } + } + }() + + var stagedPath storiface.SectorPaths + if len(existingPieceSizes) == 0 { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = partialfile.CreatePartialFile(maxPieceSize, stagedPath.Unsealed) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) + } + } else { + stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, 0, storiface.PathSealing) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) + } + + stagedFile, err = partialfile.OpenPartialFile(maxPieceSize, stagedPath.Unsealed) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err) + } + } + + w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err) + } + + pw := fr32.NewPadWriter(w) + + // 从path读取car数据 + worker_car_json_file := filepath.Join(os.Getenv("LOTUS_WORKER_PATH"), "./car_path.json") + _, err = os.Stat(worker_car_json_file) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("don't have json file of car path") + } + byteValue, err := ioutil.ReadFile(worker_car_json_file) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't read %+v, err: %+v", worker_car_json_file, err) + } + var car_path CarPath + json.Unmarshal(byteValue, &car_path) + worker_car_path := "" + for _, v := range car_path.Path { + worker_car_path = "" + dir := path + cur := "" + for { + dir, cur = filepath.Split(dir) + dir = filepath.Dir(dir) + if filepath.Base(dir) == "/" { + worker_car_path = filepath.Join(v, worker_car_path) + break + } else { + worker_car_path = filepath.Join(cur, worker_car_path) + } + } + _, err = os.Stat(worker_car_path) + if err == nil { + break + } + } + log.Errorf("zlin: AddPieceOfSxx file name: %+v", worker_car_path) + file, err := os.Open(worker_car_path) + defer file.Close() + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't add piece to sector with get car fail: %w", err) + } + filestat, _ := file.Stat() + pieceData, err := shared.NewInflatorReader(file, uint64(filestat.Size()), pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't add piece to sector with read car fail: %w", err) + } + // 读取结束 + + pr := io.TeeReader(io.LimitReader(pieceData, int64(pieceSize)), pw) + + throttle := make(chan []byte, parallel) + piecePromises := make([]func() (abi.PieceInfo, error), 0) + + buf := make([]byte, chunk.Unpadded()) + for i := 0; i < parallel; i++ { + if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize { + break // won't use this many buffers + } + throttle <- make([]byte, chunk.Unpadded()) + } + + for { + var read int + for rbuf := buf; len(rbuf) > 0; { + n, err := pr.Read(rbuf) + if err != nil && err != io.EOF { + return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err) + } + + rbuf = rbuf[n:] + read += n + + if err == io.EOF { + break + } + } + if read == 0 { + break + } + + done := make(chan struct { + cid.Cid + error + }, 1) + pbuf := <-throttle + copy(pbuf, buf[:read]) + + go func(read int) { + defer func() { + throttle <- pbuf + }() + + c, err := sb.pieceCid(sector.ProofType, pbuf[:read]) + done <- struct { + cid.Cid + error + }{c, err} + }(read) + + piecePromises = append(piecePromises, func() (abi.PieceInfo, error) { + select { + case e := <-done: + if e.error != nil { + return abi.PieceInfo{}, e.error + } + + return abi.PieceInfo{ + Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(), + PieceCID: e.Cid, + }, nil + case <-ctx.Done(): + return abi.PieceInfo{}, ctx.Err() + } + }) + } + + if err := pw.Close(); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("closing padded writer: %w", err) + } + + if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err) + } + + if err := stagedFile.Close(); err != nil { + return abi.PieceInfo{}, err + } + stagedFile = nil + + if len(piecePromises) == 1 { + return piecePromises[0]() + } + + var payloadRoundedBytes abi.PaddedPieceSize + pieceCids := make([]abi.PieceInfo, len(piecePromises)) + for i, promise := range piecePromises { + pinfo, err := promise() + if err != nil { + return abi.PieceInfo{}, err + } + + pieceCids[i] = pinfo + payloadRoundedBytes += pinfo.Size + } + + pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err) + } + + // validate that the pieceCID was properly formed + if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil { + return abi.PieceInfo{}, err + } + + if payloadRoundedBytes < pieceSize.Padded() { + paddedCid, err := commpffi.ZeroPadPieceCommitment(pieceCID, payloadRoundedBytes.Unpadded(), pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to pad data: %w", err) + } + + pieceCID = paddedCid + } + + // add by xiao + if !isRealData { + sb.createTemplateFile(stagedPath.Unsealed, pieceSize, pieceCID) + } + //end + + return abi.PieceInfo{ + Size: pieceSize.Padded(), + PieceCID: pieceCID, + }, nil +} +// end + func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { + // add by lin + isRealData := true + sectortype := os.Getenv("LOTUS_SECTOR_TYPE_SXX") + if sectortype == "1" { + isRealData = false + } + if !isRealData { + if mypieceInfo, err := sb.myAddPiece(ctx, sector, pieceSize); err == nil { + return mypieceInfo, nil + } else { + log.Warn(err) + } + } + // end origPieceData := pieceData defer func() { closer, ok := origPieceData.(io.Closer) @@ -382,6 +663,12 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, exis pieceCID = paddedCid } + // add by xiao + if !isRealData { + sb.createTemplateFile(stagedPath.Unsealed, pieceSize, pieceCID) + } + //end + return abi.PieceInfo{ Size: pieceSize.Padded(), PieceCID: pieceCID, @@ -1298,3 +1585,91 @@ func (sb *Sealer) GenerateWindowPoStWithVanilla(ctx context.Context, proofType a ProofBytes: pp.ProofBytes, }, nil } + +// add by xiao +func (sb *Sealer) myAddPiece(ctx context.Context, sector storiface.SectorRef, pieceSize abi.UnpaddedPieceSize) (abi.PieceInfo, error) { + var done func() + var pieceInfo abi.PieceInfo + + defer func() { + if done != nil { + done() + } + }() + + stagedPath, done, err := sb.sectors.AcquireSector(ctx, sector, 0, storiface.FTUnsealed, storiface.PathSealing) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) + } + + n := strings.Index(stagedPath.Unsealed, "unsealed") + if n == -1 { + return abi.PieceInfo{}, xerrors.Errorf("unsealed not exsit") + } + apTemplatePath := string([]byte(stagedPath.Unsealed)[:n]) + + if _, err := os.Stat(apTemplatePath + "piece-info.json"); os.IsNotExist(err) || err != nil { // 判断piece-info.json文件是否存在 + return abi.PieceInfo{}, xerrors.Errorf("piece-info.json not exsite: %w", err) + } + + if _, err := os.Stat(apTemplatePath + "s-template"); os.IsNotExist(err) || err != nil { // 判断s-template文件是否存在 + return abi.PieceInfo{}, xerrors.Errorf("s-template not exsite: %w", err) + } + + configFile, err := os.Open(apTemplatePath + "piece-info.json") + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("open piece-info.json failed: %w", err) + } + defer configFile.Close() + + jsonParser := json.NewDecoder(configFile) + if err := jsonParser.Decode(&pieceInfo); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("decode piece-info.json failed: %w", err) + } + + if pieceInfo.Size != pieceSize.Padded() { + return abi.PieceInfo{}, xerrors.Errorf("pieceInfo.Size not the same") + } + + if err = os.Link(apTemplatePath+"s-template", stagedPath.Unsealed); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("Sector %d unsealed do not create: %w", err) + } + + return pieceInfo, nil +} + +func (sb *Sealer) createTemplateFile(unsealedFile string, pieceSize abi.UnpaddedPieceSize, pieceCID cid.Cid) { + n := strings.Index(unsealedFile, "unsealed") + if n == -1 { + log.Warn("unsealed not exsit") + } else { + apTemplatePath := string([]byte(unsealedFile)[:n]) + + myPieceInfo := &abi.PieceInfo{ + Size: pieceSize.Padded(), + PieceCID: pieceCID, + } + + fd, err := os.Create(apTemplatePath + "piece-info.json") + if err != nil { + log.Warn("create piece-info.json failed: ", err) + } + defer fd.Close() + + data, err := json.MarshalIndent(myPieceInfo, "", " ") //data类型是[]byte + if err != nil { + log.Warn("marshalIndent myPieceInfo failed: ", err) + } + + _, err = fd.Write(data) + if err != nil { + log.Warn("write piece-info.json failed : ", err) + } + + if err = os.Link(unsealedFile, apTemplatePath+"s-template"); err != nil { + log.Warn("create s-template hard link failed: ", err) + } + } +} + +// end \ No newline at end of file diff --git a/storage/sealer/manager.go b/storage/sealer/manager.go index b0c023b09fb..f617d3534c0 100644 --- a/storage/sealer/manager.go +++ b/storage/sealer/manager.go @@ -24,6 +24,11 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/storiface" + + "bytes" + "encoding/json" + "io/ioutil" + "os" ) var log = logging.Logger("advmgr") @@ -426,6 +431,40 @@ func (m *Manager) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, return out, err } +// add by lin +func (m *Manager) AddPieceOfSxx(ctx context.Context, sector storiface.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, carpath string) (abi.PieceInfo, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUnsealed); err != nil { + return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + + var selector WorkerSelector + var err error + if len(existingPieces) == 0 { // new + selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) + } else { // use existing + selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) + } + + var out abi.PieceInfo + err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error { + p, err := m.waitSimpleCall(ctx)(w.AddPieceOfSxx(ctx, sector, existingPieces, sz, carpath)) + if err != nil { + return err + } + if p != nil { + out = p.(abi.PieceInfo) + } + return nil + }) + + return out, err +} + +// end + func (m *Manager) AddPiece(ctx context.Context, sector storiface.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -539,7 +578,9 @@ func (m *Manager) SealPreCommit2(ctx context.Context, sector storiface.SectorRef return storiface.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err) } - selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true) + // change by pan + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) + //selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, true) err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, m.schedFetch(sector, storiface.FTCache|storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { err := m.startWork(ctx, w, wk)(w.SealPreCommit2(ctx, sector, phase1Out)) @@ -705,7 +746,17 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storiface.SectorRef } // get a selector for moving stuff into long-term storage - fetchSel := newMoveSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, storiface.PathStorage, !m.disallowRemoteFinalize) + // fetchSel := newMoveSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, storiface.PathStorage, !m.disallowRemoteFinalize) + + // change by pan + moveByWorker := m.MoveByWorker(ctx, sector) + var fetchSel WorkerSelector + if moveByWorker { + fetchSel = newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) + } else { + fetchSel = newMoveSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, storiface.PathStorage, !m.disallowRemoteFinalize) + } + // end // only move the unsealed file if it still exists and needs moving moveUnsealed := unsealed @@ -726,6 +777,11 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storiface.SectorRef return xerrors.Errorf("moving sector to storage: %w", err) } + // add by pan + m.declareSector(ctx, sector, storiface.FTSealed) + m.declareSector(ctx, sector, storiface.FTCache) + // end + return nil } @@ -812,6 +868,11 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storiface.Se return xerrors.Errorf("moving sector to storage: %w", err) } + // add by pan + m.declareSector(ctx, sector, storiface.FTUpdate) + m.declareSector(ctx, sector, storiface.FTUpdateCache) + // end + return nil } @@ -1314,5 +1375,103 @@ func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } +// add by pan +func (m *Manager) MoveByWorker(ctx context.Context, sector storiface.SectorRef) bool { + seal, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTSealed, 0, false) + if err != nil { + return false + } + if len(seal) != 1 { + return false + } + sl, err := m.index.StorageList(ctx) + if err != nil { + return false + } + for id, _ := range sl { + store, err := m.index.StorageInfo(ctx, id) + if err != nil { + continue + } + if !store.CanStore { + continue + } + for _, value := range store.URLs { + for _, url := range seal[0].BaseURLs { + if value == url { + log.Info("SectorId(" + sector.ID.Number.String() + ") move storage by worker") + return true + } + } + } + } + log.Info("SectorId(" + sector.ID.Number.String() + ") move storage by miner") + return false +} + +func (m *Manager) declareSector(ctx context.Context, sector storiface.SectorRef, sectorFileType storiface.SectorFileType) { + + url := os.Getenv("DECLARE_API_URL") + token := os.Getenv("DECLARE_API_TOKEN") + storageID := os.Getenv("DECLARE_STORAGE_ID") + if url != "" && token != "" && storageID != "" { + m.StorageDeclareSector(ctx, sector, sectorFileType, url, token, storageID) + } + + url = os.Getenv("WINNER_DECLARE_API_URL") + token = os.Getenv("WINNER_DECLARE_API_TOKEN") + storageID = os.Getenv("WINNER_DECLARE_STORAGE_ID") + + if url != "" && token != "" && storageID != "" { + m.StorageDeclareSector(ctx, sector, sectorFileType, url, token, storageID) + } + +} + +func (m *Manager) StorageDeclareSector(ctx context.Context, sector storiface.SectorRef, sectorFileType storiface.SectorFileType, url string, token string, storageID string) { + + parameters := make(map[string]interface{}) + parameters["jsonrpc"] = "2.0" + parameters["method"] = "Filecoin.StorageDeclareSector" + parameters["params"] = []interface{}{ + storageID, + map[string]interface{}{ + "Miner": sector.ID.Miner, + "Number": sector.ID.Number, + }, + sectorFileType, + true, + } + parameters["id"] = 1 + data, err := json.Marshal(parameters) + if err != nil { + return + } + bearer := "Bearer " + token + + body := bytes.NewReader(data) + + req, err := http.NewRequest("POST", url, body) + if err != nil { + return + } + req.Header.Add("Authorization", bearer) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + buffer, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + log.Info("SectorId(" + sector.ID.Number.String() + ") declare " + string(buffer)) +} + +// end + var _ Unsealer = &Manager{} var _ SectorManager = &Manager{} diff --git a/storage/sealer/mock/mock.go b/storage/sealer/mock/mock.go index 0797bf549df..cfdb99cd290 100644 --- a/storage/sealer/mock/mock.go +++ b/storage/sealer/mock/mock.go @@ -22,6 +22,12 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" + + "os" + "path/filepath" + "encoding/json" + + "github.com/filecoin-project/go-fil-markets/shared" ) var log = logging.Logger("sbmock") @@ -35,6 +41,12 @@ type SectorMgr struct { lk sync.Mutex } +// add by lin +type CarPath struct { + Path []string +} +// end + type mockVerifProver struct { aggregates map[string]prooftypes.AggregateSealVerifyProofAndInfos // used for logging bad verifies } @@ -119,6 +131,86 @@ func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storiface.SectorRef }, nil } +func (mgr *SectorMgr) AddPieceOfSxx(ctx context.Context, sectorID storiface.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, path string) (abi.PieceInfo, error) { + log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) + + // file, err := filestore.FileStore.Open(filepath.Join(curpath, carfile)) + worker_car_json_file := filepath.Join(os.Getenv("LOTUS_WORKER_PATH"), "./car_path.json") + _, err := os.Stat(worker_car_json_file) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("don't have json file of car path") + } + byteValue, err := ioutil.ReadFile(worker_car_json_file) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't read %+v, err: %+v", worker_car_json_file, err) + } + var car_path CarPath + json.Unmarshal(byteValue, &car_path) + worker_car_path := "" + for _, v := range car_path.Path { + worker_car_path = "" + dir := path + cur := "" + for { + dir, cur = filepath.Split(dir) + dir = filepath.Dir(dir) + if filepath.Base(dir) == "/" { + worker_car_path = filepath.Join(v, worker_car_path) + break + } else { + worker_car_path = filepath.Join(cur, worker_car_path) + } + } + _, err = os.Stat(worker_car_path) + if err == nil { + break + } + } + log.Errorf("zlin: AddPieceOfSxx file name: %+v", worker_car_path) + file, err := os.Open(worker_car_path) + defer file.Close() + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't add piece to sector with get car fail: %w", err) + } + filestat, _ := file.Stat() + r, err := shared.NewInflatorReader(file, uint64(filestat.Size()), size) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("can't add piece to sector with read car fail: %w", err) + } + + var b bytes.Buffer + tr := io.TeeReader(r, &b) + + c, err := commpffi.GeneratePieceCIDFromFile(sectorID.ProofType, tr, size) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err) + } + + log.Warn("Generated Piece CID: ", c) + + mgr.lk.Lock() + mgr.pieces[c] = b.Bytes() + + ss, ok := mgr.sectors[sectorID.ID] + if !ok { + ss = §orState{ + state: statePacking, + } + mgr.sectors[sectorID.ID] = ss + } + mgr.lk.Unlock() + + ss.lk.Lock() + ss.pieces = append(ss.pieces, c) + ss.lk.Unlock() + + return abi.PieceInfo{ + + Size: size.Padded(), + PieceCID: c, + }, nil +} + func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { mgr.lk.Lock() defer mgr.lk.Unlock() diff --git a/storage/sealer/sched.go b/storage/sealer/sched.go index 335bb124929..ace2df12413 100644 --- a/storage/sealer/sched.go +++ b/storage/sealer/sched.go @@ -2,6 +2,11 @@ package sealer import ( "context" + // "fmt" + // "io/ioutil" + "os" + "path/filepath" + // "strconv" "sync" "time" @@ -446,3 +451,167 @@ func (sh *Scheduler) Close(ctx context.Context) error { } return nil } + +// add by pan +// func (sh *Scheduler) findWorker(task *WorkerRequest) int { +// i := -1 +// if task.TaskType == sealtasks.TTPreCommit2 { +// i = sh.findStorageWorker(task) +// } else { +// // if task.TaskType == sealtasks.TTAddPiece { +// // sh.assignWorker(task) +// // } +// i = sh.findFileWorker(task) +// } +// return i +// } +// func (sh *Scheduler) assignWorker(task *WorkerRequest) error { +// minerpath := os.Getenv("LOTUS_MINER_PATH") +// sectorspath := filepath.Join(minerpath, "./sectors") + +// _, err := os.Stat(sectorspath + "/" + storiface.SectorName(task.Sector.ID)) +// if err == nil { +// return nil +// } + +// workerpath := filepath.Join(minerpath, "./worker") + +// files, err := ioutil.ReadDir(workerpath) +// if err != nil { +// return err +// } +// tmp := map[int64]string{} +// for _, file := range files { + +// buffer, err := ioutil.ReadFile(workerpath + "/" + file.Name()) +// if err != nil { +// continue +// } +// value := string(buffer) + +// key, err := strconv.ParseInt(file.Name(), 10, 64) + +// tmp[key] = value +// } + +// var workename string +// var t int64 +// t = -1 +// for key, value := range tmp { +// if t == -1 || key < t { +// workename = value +// t = key +// } +// } +// if t > -1 { +// file := fmt.Sprintf("%s/%d", workerpath, t) +// os.Remove(file) +// _, err := os.Stat(sectorspath) +// if err != nil { +// err = os.Mkdir(sectorspath, 0755) +// } +// path := sectorspath + "/" + storiface.SectorName(task.Sector.ID) +// err = os.WriteFile(path, []byte(workename), 0666) +// } +// return nil +// } +// func (sh *Scheduler) findStorageWorker(task *WorkerRequest) int { +// ctx := task.Ctx +// sel, ok := task.Sel.(*existingSelector) +// if !ok { +// return -1 +// } +// ssize, err := task.Sector.ProofType.SectorSize() +// if err != nil { +// return -1 +// } +// best, err := sel.index.StorageFindSector(ctx, task.Sector.ID, sel.fileType, ssize, false) +// if err != nil { +// return -1 +// } +// if len(best) == 0 { +// return -1 +// } +// for wnd1, windowRequest := range sh.OpenWindows { +// worker, ok := sh.Workers[windowRequest.Worker] +// if !ok { +// continue +// } +// tasks, err := worker.workerRpc.TaskTypes(ctx) +// if err != nil { +// continue +// } +// if _, supported := tasks[task.TaskType]; !supported { +// continue +// } +// paths, err := worker.workerRpc.Paths(ctx) +// if err != nil { +// continue +// } +// for _, path := range paths { +// for l := 0; l < len(best); l++ { +// info := best[l] +// if info.Weight != 0 && path.ID == info.ID { +// return wnd1 +// } +// } +// } +// } +// return -1 +// } + +// func (sh *Scheduler) findFileWorker(task *WorkerRequest) int { +// minerpath := os.Getenv("LOTUS_MINER_PATH") +// path := filepath.Join(minerpath, "./sectors/", storiface.SectorName(task.Sector.ID)) +// _, err := os.Stat(path) +// if os.IsNotExist(err) { +// return -1 +// } +// data, err := os.ReadFile(path) +// if err != nil { +// return -1 +// } +// workerid := string(data) +// for wnd1, windowRequest := range sh.OpenWindows { +// worker, ok := sh.Workers[windowRequest.Worker] +// if !ok { +// continue +// } +// if workerid == worker.Info.Hostname { +// return wnd1 +// } +// } + +// return -1 +// } + +// end + +// add by lin +func (sh *Scheduler) findWorker(task *WorkerRequest) int { + minerpath := os.Getenv("LOTUS_MINER_PATH") + sectorspath := filepath.Join(minerpath, "./sectorsworker") + path := filepath.Join(sectorspath, task.Sector.ID.Number.String()) + _, err := os.Stat(path) + if os.IsNotExist(err) { + log.Errorf("zlin IsNotExist file err: %+v", err) + return -1 + } + data, err := os.ReadFile(path) + if err != nil { + log.Errorf("zlin read file err: %+v", err) + return -1 + } + workerid := string(data) + for wnd1, windowRequest := range sh.OpenWindows { + worker, ok := sh.Workers[windowRequest.Worker] + if !ok { + continue + } + if workerid == worker.Info.Hostname { + return wnd1 + } + } + return -1 +} +// end \ No newline at end of file diff --git a/storage/sealer/sched_assigner_common.go b/storage/sealer/sched_assigner_common.go index 09ff82a8921..83d5e128b96 100644 --- a/storage/sealer/sched_assigner_common.go +++ b/storage/sealer/sched_assigner_common.go @@ -5,6 +5,8 @@ import ( "math/rand" "sort" "sync" + + "github.com/filecoin-project/lotus/storage/sealer/sealtasks" ) type WindowSelector func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int @@ -69,6 +71,28 @@ func (a *AssignerCommon) TrySched(sh *Scheduler) { var havePreferred bool for wnd, windowRequest := range sh.OpenWindows { + + // add by pan/lin + var skip = false + + // if task.TaskType == sealtasks.TTAddPiece || task.TaskType == sealtasks.TTPreCommit1 || task.TaskType == sealtasks.TTPreCommit2 || task.TaskType == sealtasks.TTReplicaUpdate { + // i := sh.findWorker(task) + // if i > -1 { + // wnd = i + // windowRequest = sh.OpenWindows[i] + // skip = true + // } + // } + if !(task.TaskType == sealtasks.TTCommit1) { + i := sh.findWorker(task) + if i > -1 { + wnd = i + windowRequest = sh.OpenWindows[i] + skip = true + } + } + // end + worker, ok := sh.Workers[windowRequest.Worker] if !ok { log.Errorf("worker referenced by windowRequest not found (worker: %s)", windowRequest.Worker) @@ -112,6 +136,12 @@ func (a *AssignerCommon) TrySched(sh *Scheduler) { } acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd) + + // add by pan + if skip == true { + break + } + // end } if len(acceptableWindows[sqi]) == 0 { diff --git a/storage/sealer/storiface/storage.go b/storage/sealer/storiface/storage.go index 6d6063c54e2..56139f3bbe9 100644 --- a/storage/sealer/storiface/storage.go +++ b/storage/sealer/storiface/storage.go @@ -56,6 +56,7 @@ type Sealer interface { NewSector(ctx context.Context, sector SectorRef) error DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData Data) (abi.PieceInfo, error) AddPiece(ctx context.Context, sector SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData Data) (abi.PieceInfo, error) + AddPieceOfSxx(ctx context.Context, sector SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, path string) (abi.PieceInfo, error) SealPreCommit1(ctx context.Context, sector SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (PreCommit1Out, error) SealPreCommit2(ctx context.Context, sector SectorRef, pc1o PreCommit1Out) (SectorCids, error) diff --git a/storage/sealer/storiface/worker.go b/storage/sealer/storiface/worker.go index 51a7901b0bc..7ddfaa12507 100644 --- a/storage/sealer/storiface/worker.go +++ b/storage/sealer/storiface/worker.go @@ -120,6 +120,7 @@ type WorkerCalls interface { // async DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData Data) (CallID, error) AddPiece(ctx context.Context, sector SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData Data) (CallID, error) + AddPieceOfSxx(ctx context.Context, sector SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, path string) (CallID, error) SealPreCommit1(ctx context.Context, sector SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error) SealPreCommit2(ctx context.Context, sector SectorRef, pc1o PreCommit1Out) (CallID, error) SealCommit1(ctx context.Context, sector SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids SectorCids) (CallID, error) diff --git a/storage/sealer/worker_local.go b/storage/sealer/worker_local.go index 326f38366f3..a32e4e2af47 100644 --- a/storage/sealer/worker_local.go +++ b/storage/sealer/worker_local.go @@ -189,6 +189,7 @@ type ReturnType string const ( DataCid ReturnType = "DataCid" AddPiece ReturnType = "AddPiece" + AddPieceOfSxx ReturnType = "AddPieceOfSxx" SealPreCommit1 ReturnType = "SealPreCommit1" SealPreCommit2 ReturnType = "SealPreCommit2" SealCommit1 ReturnType = "SealCommit1" @@ -243,6 +244,7 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ DataCid: rfunc(storiface.WorkerReturn.ReturnDataCid), AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + AddPieceOfSxx: rfunc(storiface.WorkerReturn.ReturnAddPiece), SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), @@ -358,6 +360,19 @@ func (l *LocalWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSi }) } +// add by lin +func (l *LocalWorker) AddPieceOfSxx(ctx context.Context, sector storiface.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, path string) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, AddPieceOfSxx, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.AddPieceOfSxx(ctx, sector, epcs, sz, path) + }) +} +// end + func (l *LocalWorker) AddPiece(ctx context.Context, sector storiface.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) { sb, err := l.executor() if err != nil { @@ -790,6 +805,15 @@ func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint } func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { + hostname := l.name + + // add by pan + workername := os.Getenv("WORKER_NAME") + if workername != "" { + hostname = workername + } + // end + gpus, err := ffi.GetGPUDevices() if err != nil { log.Errorf("getting gpu devices failed: %+v", err) @@ -808,7 +832,7 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { } return storiface.WorkerInfo{ - Hostname: l.name, + Hostname: hostname, IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ MemPhysical: memPhysical, diff --git a/storage/sealer/worker_tracked.go b/storage/sealer/worker_tracked.go index 970ba9a69c5..d7d3d201a32 100644 --- a/storage/sealer/worker_tracked.go +++ b/storage/sealer/worker_tracked.go @@ -193,6 +193,14 @@ func (t *trackedWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPiece }) } +// add by lin +func (t *trackedWorker) AddPieceOfSxx(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, path string) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece, func() (storiface.CallID, error) { + return t.Worker.AddPieceOfSxx(ctx, sector, pieceSizes, newPieceSize, path) + }) +} +// end + func (t *trackedWorker) AddPiece(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) { return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece, func() (storiface.CallID, error) { return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)