Skip to content
This repository has been archived by the owner on Jun 27, 2023. It is now read-only.

fix(fsnode): issue #17 #18

Merged
merged 1 commit into from
Sep 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions hamt/hamt.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,28 +102,29 @@ func NewHamtFromDag(dserv ipld.DAGService, nd ipld.Node) (*Shard, error) {
return nil, dag.ErrNotProtobuf
}

pbd, err := format.FromBytes(pbnd.Data())
fsn, err := format.FSNodeFromBytes(pbnd.Data())
if err != nil {
return nil, err
}

if pbd.GetType() != upb.Data_HAMTShard {

if fsn.Type() != upb.Data_HAMTShard {
return nil, fmt.Errorf("node was not a dir shard")
}

if pbd.GetHashType() != HashMurmur3 {
if fsn.HashType() != HashMurmur3 {
return nil, fmt.Errorf("only murmur3 supported as hash function")
}

ds, err := makeShard(dserv, int(pbd.GetFanout()))
ds, err := makeShard(dserv, int(fsn.Fanout()))
if err != nil {
return nil, err
}

ds.nd = pbnd.Copy().(*dag.ProtoNode)
ds.children = make([]child, len(pbnd.Links()))
ds.bitfield.SetBytes(pbd.GetData())
ds.hashFunc = pbd.GetHashType()
ds.bitfield.SetBytes(fsn.Data())
ds.hashFunc = fsn.HashType()
ds.builder = ds.nd.CidBuilder()

return ds, nil
Expand Down
12 changes: 6 additions & 6 deletions importer/trickle/trickledag.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,12 +277,12 @@ func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}

if pbn.GetType() != ft.TRaw {
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}

Expand Down Expand Up @@ -325,16 +325,16 @@ func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
}

// Verify this is a branch node
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}

if pbn.GetType() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType())
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}

if len(pbn.Data) > 0 {
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}

Expand Down
4 changes: 2 additions & 2 deletions io/resolve.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (
func ResolveUnixfsOnce(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
switch nd := nd.(type) {
case *dag.ProtoNode:
upb, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
// Not a unixfs node, use standard object traversal code
lnk, err := nd.GetNodeLink(names[0])
Expand All @@ -26,7 +26,7 @@ func ResolveUnixfsOnce(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, na
return lnk, names[1:], nil
}

switch upb.GetType() {
switch fsn.Type() {
case ft.THAMTShard:
rods := dag.NewReadOnlyDagService(ds)
s, err := hamt.NewHamtFromDag(rods, nd)
Expand Down
19 changes: 9 additions & 10 deletions mod/dagmodifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
trickle "github.com/ipfs/go-unixfs/importer/trickle"
uio "github.com/ipfs/go-unixfs/io"

proto "github.com/gogo/protobuf/proto"
cid "github.com/ipfs/go-cid"
chunker "github.com/ipfs/go-ipfs-chunker"
ipld "github.com/ipfs/go-ipld-format"
Expand Down Expand Up @@ -173,11 +172,11 @@ func (dm *DagModifier) Size() (int64, error) {
func fileSize(n ipld.Node) (uint64, error) {
switch nd := n.(type) {
case *mdag.ProtoNode:
f, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return 0, err
}
return f.GetFilesize(), nil
return fsn.FileSize(), nil
case *mdag.RawNode:
return uint64(len(nd.RawData())), nil
default:
Expand Down Expand Up @@ -238,18 +237,18 @@ func (dm *DagModifier) modifyDag(n ipld.Node, offset uint64) (cid.Cid, error) {
if len(n.Links()) == 0 {
switch nd0 := n.(type) {
case *mdag.ProtoNode:
f, err := ft.FromBytes(nd0.Data())
fsn, err := ft.FSNodeFromBytes(nd0.Data())
if err != nil {
return cid.Cid{}, err
}

_, err = dm.wrBuf.Read(f.Data[offset:])
_, err = dm.wrBuf.Read(fsn.Data()[offset:])
if err != nil && err != io.EOF {
return cid.Cid{}, err
}

// Update newly written node..
b, err := proto.Marshal(f)
b, err := fsn.GetBytes()
if err != nil {
return cid.Cid{}, err
}
Expand Down Expand Up @@ -300,13 +299,13 @@ func (dm *DagModifier) modifyDag(n ipld.Node, offset uint64) (cid.Cid, error) {
return cid.Cid{}, ErrNotUnixfs
}

f, err := ft.FromBytes(node.Data())
fsn, err := ft.FSNodeFromBytes(node.Data())
if err != nil {
return cid.Cid{}, err
}

var cur uint64
for i, bs := range f.GetBlocksizes() {
for i, bs := range fsn.BlockSizes() {
// We found the correct child to write into
if cur+bs > offset {
child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv)
Expand Down Expand Up @@ -510,11 +509,11 @@ func (dm *DagModifier) dagTruncate(ctx context.Context, n ipld.Node, size uint64
switch nd := n.(type) {
case *mdag.ProtoNode:
// TODO: this can likely be done without marshaling and remarshaling
pbn, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return nil, err
}
nd.SetData(ft.WrapData(pbn.Data[:size]))
nd.SetData(ft.WrapData(fsn.Data()[:size]))
return nd, nil
case *mdag.RawNode:
return mdag.NewRawNodeWPrefix(nd.RawData()[:size], nd.Cid().Prefix())
Expand Down
4 changes: 2 additions & 2 deletions test/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,15 +107,15 @@ func ArrComp(a, b []byte) error {

// PrintDag pretty-prints the given dag to stdout.
func PrintDag(nd *mdag.ProtoNode, ds ipld.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
panic(err)
}

for i := 0; i < indent; i++ {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
fmt.Printf("{size = %d, type = %s, children = %d", fsn.FileSize(), fsn.Type().String(), fsn.NumChildren())
if len(nd.Links()) > 0 {
fmt.Println()
}
Expand Down
16 changes: 16 additions & 0 deletions unixfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ var (
)

// FromBytes unmarshals a byte slice as protobuf Data.
// Deprecated: Use `FSNodeFromBytes` instead to avoid direct manipulation of `pb.Data`.
func FromBytes(data []byte) (*pb.Data, error) {
pbdata := new(pb.Data)
err := proto.Unmarshal(data, pbdata)
Expand Down Expand Up @@ -182,6 +183,16 @@ func NewFSNode(dataType pb.Data_DataType) *FSNode {
return n
}

// HashType gets hash type of format
func (n *FSNode) HashType() uint64 {
return n.format.GetHashType()
}

// Fanout gets fanout of format
func (n *FSNode) Fanout() uint64 {
return n.format.GetFanout()
}

// AddBlockSize adds the size of the next child block of this node
func (n *FSNode) AddBlockSize(s uint64) {
n.UpdateFilesize(int64(s))
Expand All @@ -200,6 +211,11 @@ func (n *FSNode) BlockSize(i int) uint64 {
return n.format.Blocksizes[i]
}

// BlockSizes gets blocksizes of format
func (n *FSNode) BlockSizes() []uint64 {
return n.format.GetBlocksizes()
}

// RemoveAllBlockSizes removes all the child block sizes of this node.
func (n *FSNode) RemoveAllBlockSizes() {
n.format.Blocksizes = []uint64{}
Expand Down
12 changes: 6 additions & 6 deletions unixfs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,12 @@ func TestPBdataTools(t *testing.T) {
t.Fatal("Unwrap failed to produce the correct wrapped data.")
}

rawPBdata, err := FromBytes(rawPB)
rawPBdata, err := FSNodeFromBytes(rawPB)
if err != nil {
t.Fatal(err)
}

isRaw := rawPBdata.GetType() == TRaw
isRaw := rawPBdata.Type() == TRaw
if !isRaw {
t.Fatal("WrapData does not create pb.Data_Raw!")
}
Expand All @@ -97,8 +97,8 @@ func TestPBdataTools(t *testing.T) {
}

dirPB := FolderPBData()
dir, err := FromBytes(dirPB)
isDir := dir.GetType() == TDirectory
dir, err := FSNodeFromBytes(dirPB)
isDir := dir.Type() == TDirectory
if !isDir {
t.Fatal("FolderPBData does not create a directory!")
}
Expand All @@ -115,8 +115,8 @@ func TestPBdataTools(t *testing.T) {
t.Fatal(err)
}

catSymPB, err := FromBytes(catSym)
isSym := catSymPB.GetType() == TSymlink
catSymPB, err := FSNodeFromBytes(catSym)
isSym := catSymPB.Type() == TSymlink
if !isSym {
t.Fatal("Failed to make a Symlink.")
}
Expand Down