diff --git a/.gitignore b/.gitignore index a14ae54c8ac..fcbdfe803ec 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ *.out *.test *.orig +*~ diff --git a/.travis.yml b/.travis.yml index 32ce5a78639..f5bf5732a28 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,3 +8,5 @@ go: script: - go test -v ./... + +env: TEST_NO_FUSE=1 diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 985b0bccf28..818b74451f2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -38,6 +38,10 @@ "Comment": "null-15", "Rev": "12e4b4183793ac4b061921e7980845e750679fd0" }, + { + "ImportPath": "github.com/bren2010/proquint", + "Rev": "5958552242606512f714d2e93513b380f43f9991" + }, { "ImportPath": "github.com/camlistore/lock", "Rev": "ae27720f340952636b826119b58130b9c1a847a0" @@ -60,20 +64,24 @@ }, { "ImportPath": "github.com/jbenet/datastore.go", - "Rev": "e89f0511689bb2d0608496e15491f241842de085" + "Rev": "60ebc56447b5a8264cfed3ae3ff48deb984d7cf1" }, { "ImportPath": "github.com/jbenet/go-base58", "Rev": "568a28d73fd97651d3442392036a658b6976eed5" }, + { + "ImportPath": "github.com/jbenet/go-is-domain", + "Rev": "93b717f2ae17838a265e30277275ee99ee7198d6" + }, { "ImportPath": "github.com/jbenet/go-msgio", "Rev": "c9069ab79c95aa0686347b516972c7329c4391f2" }, { "ImportPath": "github.com/jbenet/go-multiaddr", - "Comment": "0.1.2-3-g74443fc", - "Rev": "74443fca319c4c2f5e9968b8e268c30a4a74dc64" + "Comment": "0.1.2-9-g1ec9436", + "Rev": "1ec9436b1d642f4f04c0d9e21a0719cda3d659ee" }, { "ImportPath": "github.com/jbenet/go-multihash", diff --git a/Godeps/_workspace/src/github.com/bren2010/proquint/README.md b/Godeps/_workspace/src/github.com/bren2010/proquint/README.md new file mode 100644 index 00000000000..13e7b0b5eba --- /dev/null +++ b/Godeps/_workspace/src/github.com/bren2010/proquint/README.md @@ -0,0 +1,6 @@ +Proquint +------- + +Golang implementation of [Proquint Pronounceable Identifiers](https://github.com/deoxxa/proquint). + + diff --git a/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go b/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go new file mode 100644 index 00000000000..60e1cf9816f --- /dev/null +++ b/Godeps/_workspace/src/github.com/bren2010/proquint/proquint.go @@ -0,0 +1,123 @@ +/* +Copyright (c) 2014 Brendan McMillion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +package proquint + +import ( + "bytes" + "strings" + "regexp" +) + +var ( + conse = [...]byte{'b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', + 'p', 'r', 's', 't', 'v', 'z'} + vowse = [...]byte{'a', 'i', 'o', 'u'} + + consd = map[byte] uint16 { + 'b' : 0, 'd' : 1, 'f' : 2, 'g' : 3, + 'h' : 4, 'j' : 5, 'k' : 6, 'l' : 7, + 'm' : 8, 'n' : 9, 'p' : 10, 'r' : 11, + 's' : 12, 't' : 13, 'v' : 14, 'z' : 15, + } + + vowsd = map[byte] uint16 { + 'a' : 0, 'i' : 1, 'o' : 2, 'u' : 3, + } +) + +/** +* Tests if a given string is a Proquint identifier +* +* @param {string} str The candidate string. +* +* @return {bool} Whether or not it qualifies. +* @return {error} Error +*/ +func IsProquint(str string) (bool, error) { + exp := "^([abdfghijklmnoprstuvz]{5}-)*[abdfghijklmnoprstuvz]{5}$" + ok, err := regexp.MatchString(exp, str) + + return ok, err +} + +/** +* Encodes an arbitrary byte slice into an identifier. +* +* @param {[]byte} buf Slice of bytes to encode. +* +* @return {string} The given byte slice as an identifier. +*/ +func Encode(buf []byte) string { + var out bytes.Buffer + + for i := 0; i < len(buf); i = i + 2 { + var n uint16 = (uint16(buf[i]) * 256) + uint16(buf[i + 1]) + + var ( + c1 = n & 0x0f + v1 = (n >> 4) & 0x03 + c2 = (n >> 6) & 0x0f + v2 = (n >> 10) & 0x03 + c3 = (n >> 12) & 0x0f + ) + + out.WriteByte(conse[c1]) + out.WriteByte(vowse[v1]) + out.WriteByte(conse[c2]) + out.WriteByte(vowse[v2]) + out.WriteByte(conse[c3]) + + if (i + 2) < len(buf) { + out.WriteByte('-') + } + } + + return out.String() +} + +/** +* Decodes an identifier into its corresponding byte slice. +* +* @param {string} str Identifier to convert. +* +* @return {[]byte} The identifier as a byte slice. +*/ +func Decode(str string) []byte { + var ( + out bytes.Buffer + bits []string = strings.Split(str, "-") + ) + + for i := 0; i < len(bits); i++ { + var x uint16 = consd[bits[i][0]] + + (vowsd[bits[i][1]] << 4) + + (consd[bits[i][2]] << 6) + + (vowsd[bits[i][3]] << 10) + + (consd[bits[i][4]] << 12) + + out.WriteByte(byte(x >> 8)) + out.WriteByte(byte(x)) + } + + return out.Bytes() +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Godeps.json b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Godeps.json new file mode 100644 index 00000000000..bebf4a58f25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Godeps.json @@ -0,0 +1,43 @@ +{ + "ImportPath": "github.com/jbenet/datastore.go", + "GoVersion": "go1.3.1", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "code.google.com/p/go-uuid/uuid", + "Comment": "null-12", + "Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9" + }, + { + "ImportPath": "code.google.com/p/snappy-go/snappy", + "Comment": "null-15", + "Rev": "12e4b4183793ac4b061921e7980845e750679fd0" + }, + { + "ImportPath": "github.com/codahale/blake2", + "Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4" + }, + { + "ImportPath": "github.com/hashicorp/golang-lru", + "Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370" + }, + { + "ImportPath": "github.com/mattbaird/elastigo/api", + "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" + }, + { + "ImportPath": "github.com/mattbaird/elastigo/core", + "Rev": "041b88c1fcf6489a5721ede24378ce1253b9159d" + }, + { + "ImportPath": "github.com/syndtr/goleveldb/leveldb", + "Rev": "9bca75c48d6c31becfbb127702b425e7226052e3" + }, + { + "ImportPath": "gopkg.in/check.v1", + "Rev": "91ae5f88a67b14891cfd43895b01164f6c120420" + } + ] +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Readme b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Readme new file mode 100644 index 00000000000..4cdaa53d56d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go index 33ea5f3a7bd..782b5de4608 100644 --- a/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds.go @@ -1,28 +1,30 @@ package datastore -import ( - "log" -) +import "log" // Here are some basic datastore implementations. -// MapDatastore uses a standard Go map for internal storage. type keyMap map[Key]interface{} + +// MapDatastore uses a standard Go map for internal storage. type MapDatastore struct { values keyMap } +// NewMapDatastore constructs a MapDatastore func NewMapDatastore() (d *MapDatastore) { return &MapDatastore{ values: keyMap{}, } } +// Put implements Datastore.Put func (d *MapDatastore) Put(key Key, value interface{}) (err error) { d.values[key] = value return nil } +// Get implements Datastore.Get func (d *MapDatastore) Get(key Key) (value interface{}, err error) { val, found := d.values[key] if !found { @@ -31,19 +33,22 @@ func (d *MapDatastore) Get(key Key) (value interface{}, err error) { return val, nil } +// Has implements Datastore.Has func (d *MapDatastore) Has(key Key) (exists bool, err error) { _, found := d.values[key] return found, nil } +// Delete implements Datastore.Delete func (d *MapDatastore) Delete(key Key) (err error) { delete(d.values, key) return nil } +// KeyList implements Datastore.KeyList func (d *MapDatastore) KeyList() ([]Key, error) { var keys []Key - for k, _ := range d.values { + for k := range d.values { keys = append(keys, k) } return keys, nil @@ -54,26 +59,32 @@ func (d *MapDatastore) KeyList() ([]Key, error) { type NullDatastore struct { } +// NewNullDatastore constructs a null datastoe func NewNullDatastore() *NullDatastore { return &NullDatastore{} } +// Put implements Datastore.Put func (d *NullDatastore) Put(key Key, value interface{}) (err error) { return nil } +// Get implements Datastore.Get func (d *NullDatastore) Get(key Key) (value interface{}, err error) { return nil, nil } +// Has implements Datastore.Has func (d *NullDatastore) Has(key Key) (exists bool, err error) { return false, nil } +// Delete implements Datastore.Delete func (d *NullDatastore) Delete(key Key) (err error) { return nil } +// KeyList implements Datastore.KeyList func (d *NullDatastore) KeyList() ([]Key, error) { return nil, nil } @@ -81,38 +92,56 @@ func (d *NullDatastore) KeyList() ([]Key, error) { // LogDatastore logs all accesses through the datastore. type LogDatastore struct { Name string - Child Datastore + child Datastore +} + +// Shim is a datastore which has a child. +type Shim interface { + Datastore + + Children() []Datastore } -func NewLogDatastore(ds Datastore, name string) *LogDatastore { +// NewLogDatastore constructs a log datastore. +func NewLogDatastore(ds Datastore, name string) Shim { if len(name) < 1 { name = "LogDatastore" } - return &LogDatastore{Name: name, Child: ds} + return &LogDatastore{Name: name, child: ds} +} + +// Children implements Shim +func (d *LogDatastore) Children() []Datastore { + return []Datastore{d.child} } +// Put implements Datastore.Put func (d *LogDatastore) Put(key Key, value interface{}) (err error) { - log.Printf("%s: Put %s", d.Name, key) + log.Printf("%s: Put %s\n", d.Name, key) // log.Printf("%s: Put %s ```%s```", d.Name, key, value) - return d.Child.Put(key, value) + return d.child.Put(key, value) } +// Get implements Datastore.Get func (d *LogDatastore) Get(key Key) (value interface{}, err error) { - log.Printf("%s: Get %s", d.Name, key) - return d.Child.Get(key) + log.Printf("%s: Get %s\n", d.Name, key) + return d.child.Get(key) } +// Has implements Datastore.Has func (d *LogDatastore) Has(key Key) (exists bool, err error) { - log.Printf("%s: Has %s", d.Name, key) - return d.Child.Has(key) + log.Printf("%s: Has %s\n", d.Name, key) + return d.child.Has(key) } +// Delete implements Datastore.Delete func (d *LogDatastore) Delete(key Key) (err error) { - log.Printf("%s: Delete %s", d.Name, key) - return d.Child.Delete(key) + log.Printf("%s: Delete %s\n", d.Name, key) + return d.child.Delete(key) } +// KeyList implements Datastore.KeyList func (d *LogDatastore) KeyList() ([]Key, error) { - log.Printf("%s: Get KeyList.", d.Name) - return d.Child.KeyList() + log.Printf("%s: Get KeyList\n", d.Name) + return d.child.KeyList() } diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds_test.go new file mode 100644 index 00000000000..e175d94dab1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/basic_ds_test.go @@ -0,0 +1,13 @@ +package datastore_test + +import ( + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + . "launchpad.net/gocheck" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type BasicSuite struct{} + +var _ = Suite(&BasicSuite{}) diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go index 9ff21a6a078..f3260cd7da4 100644 --- a/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/datastore.go @@ -5,7 +5,7 @@ import ( ) /* -A Datastore represents storage for any key-value pair. +Datastore represents storage for any key-value pair. Datastores are general enough to be backed by all kinds of different storage: in-memory caches, databases, a remote datastore, flat files on disk, etc. @@ -27,7 +27,6 @@ and thus it should behave predictably and handle exceptional conditions with proper error reporting. Thus, all Datastore calls may return errors, which should be checked by callers. */ - type Datastore interface { // Put stores the object `value` named by `key`. // @@ -53,20 +52,27 @@ type Datastore interface { // Delete removes the value for given `key`. Delete(key Key) (err error) - // Returns a list of keys in the datastore + // KeyList returns a list of keys in the datastore KeyList() ([]Key, error) } +// ThreadSafeDatastore is an interface that all threadsafe datastore should +// implement to leverage type safety checks. +type ThreadSafeDatastore interface { + Datastore + IsThreadSafe() +} + // Errors // ErrNotFound is returned by Get, Has, and Delete when a datastore does not // map the given key to a value. -var ErrNotFound = errors.New("datastore: key not found.") +var ErrNotFound = errors.New("datastore: key not found") // ErrInvalidType is returned by Put when a given value is incopatible with // the type the datastore supports. This means a conversion (or serialization) // is needed beforehand. -var ErrInvalidType = errors.New("datastore: invalid type error.") +var ErrInvalidType = errors.New("datastore: invalid type error") // GetBackedHas provides a default Datastore.Has implementation. // It exists so Datastore.Has implementations can use it, like so: diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs.go new file mode 100644 index 00000000000..1c1ac791d52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs.go @@ -0,0 +1,122 @@ +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// Datastore uses a standard Go map for internal storage. +type Datastore struct { + path string +} + +// NewDatastore returns a new fs Datastore at given `path` +func NewDatastore(path string) (ds.Datastore, error) { + if !isDir(path) { + return nil, fmt.Errorf("Failed to find directory at: %v (file? perms?)", path) + } + + return &Datastore{path: path}, nil +} + +// KeyFilename returns the filename associated with `key` +func (d *Datastore) KeyFilename(key ds.Key) string { + return filepath.Join(d.path, key.String(), ".dsobject") +} + +// Put stores the given value. +func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { + + // TODO: maybe use io.Readers/Writers? + // r, err := dsio.CastAsReader(value) + // if err != nil { + // return err + // } + + val, ok := value.([]byte) + if !ok { + return ds.ErrInvalidType + } + + fn := d.KeyFilename(key) + + // mkdirall above. + err = os.MkdirAll(filepath.Dir(fn), 0755) + if err != nil { + return err + } + + return ioutil.WriteFile(fn, val, 0666) +} + +// Get returns the value for given key +func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { + fn := d.KeyFilename(key) + if !isFile(fn) { + return nil, ds.ErrNotFound + } + + return ioutil.ReadFile(fn) +} + +// Has returns whether the datastore has a value for a given key +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + return ds.GetBackedHas(d, key) +} + +// Delete removes the value for given key +func (d *Datastore) Delete(key ds.Key) (err error) { + fn := d.KeyFilename(key) + if !isFile(fn) { + return ds.ErrNotFound + } + + return os.Remove(fn) +} + +// KeyList returns a list of all keys in the datastore +func (d *Datastore) KeyList() ([]ds.Key, error) { + + keys := []ds.Key{} + + walkFn := func(path string, info os.FileInfo, err error) error { + // remove ds path prefix + if strings.HasPrefix(path, d.path) { + path = path[len(d.path):] + } + + if !info.IsDir() { + key := ds.NewKey(path) + keys = append(keys, key) + } + return nil + } + + filepath.Walk(d.path, walkFn) + return keys, nil +} + +// isDir returns whether given path is a directory +func isDir(path string) bool { + finfo, err := os.Stat(path) + if err != nil { + return false + } + + return finfo.IsDir() +} + +// isFile returns whether given path is a file +func isFile(path string) bool { + finfo, err := os.Stat(path) + if err != nil { + return false + } + + return !finfo.IsDir() +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs_test.go new file mode 100644 index 00000000000..cd69647d0e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs/fs_test.go @@ -0,0 +1,65 @@ +package fs_test + +import ( + "bytes" + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + fs "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs" + . "launchpad.net/gocheck" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type DSSuite struct { + dir string + ds ds.Datastore +} + +var _ = Suite(&DSSuite{}) + +func (ks *DSSuite) SetUpTest(c *C) { + ks.dir = c.MkDir() + ks.ds, _ = fs.NewDatastore(ks.dir) +} + +func (ks *DSSuite) TestOpen(c *C) { + _, err := fs.NewDatastore("/tmp/foo/bar/baz") + c.Assert(err, Not(Equals), nil) + + // setup ds + _, err = fs.NewDatastore(ks.dir) + c.Assert(err, Equals, nil) +} + +func (ks *DSSuite) TestBasic(c *C) { + + keys := strsToKeys([]string{ + "foo", + "foo/bar", + "foo/bar/baz", + "foo/barb", + "foo/bar/bazb", + "foo/bar/baz/barb", + }) + + for _, k := range keys { + err := ks.ds.Put(k, []byte(k.String())) + c.Check(err, Equals, nil) + } + + for _, k := range keys { + v, err := ks.ds.Get(k) + c.Check(err, Equals, nil) + c.Check(bytes.Equal(v.([]byte), []byte(k.String())), Equals, true) + } +} + +func strsToKeys(strs []string) []ds.Key { + keys := make([]ds.Key, len(strs)) + for i, s := range strs { + keys[i] = ds.NewKey(s) + } + return keys +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/io/io.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/io/io.go new file mode 100644 index 00000000000..338b2e053d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/io/io.go @@ -0,0 +1,44 @@ +package leveldb + +import ( + "bytes" + "io" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// CastAsReader does type assertions to find the type of a value and attempts +// to turn it into an io.Reader. If not possible, will return ds.ErrInvalidType +func CastAsReader(value interface{}) (io.Reader, error) { + switch v := value.(type) { + case io.Reader: + return v, nil + + case []byte: + return bytes.NewReader(v), nil + + case string: + return bytes.NewReader([]byte(v)), nil + + default: + return nil, ds.ErrInvalidType + } +} + +// // CastAsWriter does type assertions to find the type of a value and attempts +// // to turn it into an io.Writer. If not possible, will return ds.ErrInvalidType +// func CastAsWriter(value interface{}) (err error) { +// switch v := value.(type) { +// case io.Reader: +// return v, nil +// +// case []byte: +// return bytes.NewReader(v), nil +// +// case string: +// return bytes.NewReader([]byte(v)), nil +// +// default: +// return nil, ds.ErrInvalidType +// } +// } diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go index df210523f57..0fc2ec0542f 100644 --- a/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/key_test.go @@ -2,12 +2,13 @@ package datastore_test import ( "bytes" - . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" - . "launchpad.net/gocheck" "math/rand" "path" "strings" "testing" + + . "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform.go new file mode 100644 index 00000000000..3be41db6533 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform.go @@ -0,0 +1,88 @@ +package keytransform + +import ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + +// KeyTransform is a function that transforms one key into another. +type KeyTransform func(ds.Key) ds.Key + +// Datastore is a keytransform.Datastore +type Datastore interface { + ds.Shim + + // Transform runs the transformation function + Transform(ds.Key) ds.Key + + // TransformFunc returns the KeyTransform function + TransformFunc() KeyTransform +} + +// ktds keeps a KeyTransform function +type ktds struct { + child ds.Datastore + xform KeyTransform +} + +// WrapDatastore wraps a given datastore with a KeyTransform function. +// The resulting wrapped datastore will use the transform on all Datastore +// operations. +func WrapDatastore(child ds.Datastore, f KeyTransform) Datastore { + if f == nil { + panic("f (KeyTransform) is nil") + } + + if child == nil { + panic("child (ds.Datastore) is nil") + } + + return &ktds{child, f} +} + +// TransformFunc returns the KeyTransform function +func (d *ktds) TransformFunc() KeyTransform { + return d.xform +} + +// Transform runs the KeyTransform function +func (d *ktds) Transform(k ds.Key) ds.Key { + return d.xform(k) +} + +// Children implements ds.Shim +func (d *ktds) Children() []ds.Datastore { + return []ds.Datastore{d.child} +} + +// Put stores the given value, transforming the key first. +func (d *ktds) Put(key ds.Key, value interface{}) (err error) { + return d.child.Put(d.Transform(key), value) +} + +// Get returns the value for given key, transforming the key first. +func (d *ktds) Get(key ds.Key) (value interface{}, err error) { + return d.child.Get(d.Transform(key)) +} + +// Has returns whether the datastore has a value for a given key, transforming +// the key first. +func (d *ktds) Has(key ds.Key) (exists bool, err error) { + return d.child.Has(d.Transform(key)) +} + +// Delete removes the value for given key +func (d *ktds) Delete(key ds.Key) (err error) { + return d.child.Delete(d.Transform(key)) +} + +// KeyList returns a list of all keys in the datastore, transforming keys out. +func (d *ktds) KeyList() ([]ds.Key, error) { + + keys, err := d.child.KeyList() + if err != nil { + return nil, err + } + + for i, k := range keys { + keys[i] = d.Transform(k) + } + return keys, nil +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform_test.go new file mode 100644 index 00000000000..ad752d27424 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform/keytransform_test.go @@ -0,0 +1,60 @@ +package keytransform_test + +import ( + "bytes" + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + kt "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform" + . "launchpad.net/gocheck" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type DSSuite struct { + dir string + ds ds.Datastore +} + +var _ = Suite(&DSSuite{}) + +func (ks *DSSuite) TestBasic(c *C) { + + mpds := ds.NewMapDatastore() + ktds := kt.WrapDatastore(mpds, func(k ds.Key) ds.Key { + return k.Reverse() + }) + + keys := strsToKeys([]string{ + "foo", + "foo/bar", + "foo/bar/baz", + "foo/barb", + "foo/bar/bazb", + "foo/bar/baz/barb", + }) + + for _, k := range keys { + err := ktds.Put(k, []byte(k.String())) + c.Check(err, Equals, nil) + } + + for _, k := range keys { + v1, err := ktds.Get(k) + c.Check(err, Equals, nil) + c.Check(bytes.Equal(v1.([]byte), []byte(k.String())), Equals, true) + + v2, err := mpds.Get(k.Reverse()) + c.Check(err, Equals, nil) + c.Check(bytes.Equal(v2.([]byte), []byte(k.String())), Equals, true) + } +} + +func strsToKeys(strs []string) []ds.Key { + keys := make([]ds.Key, len(strs)) + for i, s := range strs { + keys[i] = ds.NewKey(s) + } + return keys +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go index 1b00d8bfd01..6fc2594a178 100644 --- a/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb/datastore.go @@ -13,7 +13,7 @@ type Datastore struct { type Options opt.Options -func NewDatastore(path string, opts *Options) (*Datastore, error) { +func NewDatastore(path string, opts *Options) (ds.ThreadSafeDatastore, error) { var nopts opt.Options if opts != nil { nopts = opt.Options(*opts) @@ -76,3 +76,5 @@ func (d *Datastore) KeyList() ([]ds.Key, error) { func (d *Datastore) Close() (err error) { return d.DB.Close() } + +func (d *Datastore) IsThreadSafe() {} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore.go new file mode 100644 index 00000000000..2dd74faa9ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore.go @@ -0,0 +1,54 @@ +package lru + +import ( + "errors" + + lru "github.com/hashicorp/golang-lru" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// Datastore uses golang-lru for internal storage. +type Datastore struct { + cache *lru.Cache +} + +// NewDatastore constructs a new LRU Datastore with given capacity. +func NewDatastore(capacity int) (*Datastore, error) { + cache, err := lru.New(capacity) + if err != nil { + return nil, err + } + + return &Datastore{cache: cache}, nil +} + +// Put stores the object `value` named by `key`. +func (d *Datastore) Put(key ds.Key, value interface{}) (err error) { + d.cache.Add(key, value) + return nil +} + +// Get retrieves the object `value` named by `key`. +func (d *Datastore) Get(key ds.Key) (value interface{}, err error) { + val, ok := d.cache.Get(key) + if !ok { + return nil, ds.ErrNotFound + } + return val, nil +} + +// Has returns whether the `key` is mapped to a `value`. +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + return ds.GetBackedHas(d, key) +} + +// Delete removes the value for given `key`. +func (d *Datastore) Delete(key ds.Key) (err error) { + d.cache.Remove(key) + return nil +} + +// KeyList returns a list of keys in the datastore +func (d *Datastore) KeyList() ([]ds.Key, error) { + return nil, errors.New("KeyList not implemented.") +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore_test.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore_test.go new file mode 100644 index 00000000000..b030df9a240 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru/datastore_test.go @@ -0,0 +1,52 @@ +package lru_test + +import ( + "strconv" + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + lru "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/lru" + . "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { TestingT(t) } + +type DSSuite struct{} + +var _ = Suite(&DSSuite{}) + +func (ks *DSSuite) TestBasic(c *C) { + var size = 1000 + + d, err := lru.NewDatastore(size) + c.Check(err, Equals, nil) + + for i := 0; i < size; i++ { + err := d.Put(ds.NewKey(strconv.Itoa(i)), i) + c.Check(err, Equals, nil) + } + + for i := 0; i < size; i++ { + j, err := d.Get(ds.NewKey(strconv.Itoa(i))) + c.Check(j, Equals, i) + c.Check(err, Equals, nil) + } + + for i := 0; i < size; i++ { + err := d.Put(ds.NewKey(strconv.Itoa(i+size)), i) + c.Check(err, Equals, nil) + } + + for i := 0; i < size; i++ { + j, err := d.Get(ds.NewKey(strconv.Itoa(i))) + c.Check(j, Equals, nil) + c.Check(err, Equals, ds.ErrNotFound) + } + + for i := 0; i < size; i++ { + j, err := d.Get(ds.NewKey(strconv.Itoa(i + size))) + c.Check(j, Equals, i) + c.Check(err, Equals, nil) + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/query.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/query.go new file mode 100644 index 00000000000..2f89def7c96 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/query.go @@ -0,0 +1,19 @@ +package datastore + +// type KeyIterator struct { +// HasNext() bool +// Next() interface{} +// } + +// type Query struct { +// } + +/* +QueryDatastores support a Query interface. Queries are used to support +searching for values (beyond simple key-based `Get`s). +*/ +// type QueryDatastore interface { +// // Query returns an Iterator of Keys whose Values match criteria +// // expressed in `query`. +// Query(Query) (iter Iterator, err error) +// } diff --git a/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync/sync.go b/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync/sync.go new file mode 100644 index 00000000000..06a363410b3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync/sync.go @@ -0,0 +1,64 @@ +package sync + +import ( + "sync" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// MutexDatastore contains a child datastire and a mutex. +// used for coarse sync +type MutexDatastore struct { + sync.RWMutex + + child ds.Datastore +} + +// MutexWrap constructs a datastore with a coarse lock around +// the entire datastore, for every single operation +func MutexWrap(d ds.Datastore) ds.ThreadSafeDatastore { + return &MutexDatastore{child: d} +} + +// Children implements Shim +func (d *MutexDatastore) Children() []ds.Datastore { + return []ds.Datastore{d.child} +} + +// IsThreadSafe implements ThreadSafeDatastore +func (d *MutexDatastore) IsThreadSafe() {} + +// Put implements Datastore.Put +func (d *MutexDatastore) Put(key ds.Key, value interface{}) (err error) { + d.Lock() + defer d.Unlock() + return d.child.Put(key, value) +} + +// Get implements Datastore.Get +func (d *MutexDatastore) Get(key ds.Key) (value interface{}, err error) { + d.RLock() + defer d.RUnlock() + return d.child.Get(key) +} + +// Has implements Datastore.Has +func (d *MutexDatastore) Has(key ds.Key) (exists bool, err error) { + d.RLock() + defer d.RUnlock() + return d.child.Has(key) +} + +// Delete implements Datastore.Delete +func (d *MutexDatastore) Delete(key ds.Key) (err error) { + d.Lock() + defer d.Unlock() + return d.child.Delete(key) +} + +// KeyList implements Datastore.KeyList +func (d *MutexDatastore) KeyList() ([]ds.Key, error) { + d.RLock() + defer d.RUnlock() + return d.child.KeyList() +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/LICENSE b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/LICENSE new file mode 100644 index 00000000000..c7386b3c940 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/README.md b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/README.md new file mode 100644 index 00000000000..21af7b39545 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/README.md @@ -0,0 +1,23 @@ +# go-is-domain + +This package is dedicated to [@whyrusleeping](https://github.com/whyrusleeping). + +Docs: https://godoc.org/github.com/jbenet/go-is-domain + + +Check whether something is a domain. + + +```Go + +import ( + isd "github.com/jbenet/go-is-domain" +) + +isd.IsDomain("foo.com") // true +isd.IsDomain("foo.bar.com.") // true +isd.IsDomain("foo.bar.baz") // false + +``` + +MIT Licensed diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/doc.go b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/doc.go new file mode 100644 index 00000000000..db6855c4ade --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/doc.go @@ -0,0 +1,13 @@ +/* +Package isdomain package allows users to check whether strings represent domain names. + + import ( + isd "github.com/jbenet/go-is-domain" + ) + + isd.IsDomain("foo.com") // true + isd.IsDomain("foo.bar.com.") // true + isd.IsDomain("foo.bar.baz") // false + +*/ +package isdomain diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/domainre.go b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/domainre.go new file mode 100644 index 00000000000..3b915f8e827 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/domainre.go @@ -0,0 +1,12 @@ +package isdomain + +import "regexp" + +// DomainRegexpStr is a regular expression string to validate domains. +const DomainRegexpStr = "^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$" + +var domainRegexp *regexp.Regexp + +func init() { + domainRegexp = regexp.MustCompile(DomainRegexpStr) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain.go b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain.go new file mode 100644 index 00000000000..7591c7dc48d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain.go @@ -0,0 +1,44 @@ +package isdomain + +import "strings" + +// IsICANNTLD returns whether the given string is a TLD (Top Level Domain), +// according to ICANN. Well, really according to the TLDs listed in this +// package. +func IsICANNTLD(s string) bool { + s = strings.ToUpper(s) + _, found := TLDs[s] + return found +} + +// IsExtendedTLD returns whether the given string is a TLD (Top Level Domain), +// extended with a few other "TLDs": .bit, .onion +func IsExtendedTLD(s string) bool { + s = strings.ToUpper(s) + _, found := ExtendedTLDs[s] + return found +} + +// IsTLD returns whether the given string is a TLD (according to ICANN, or +// in the set of ExtendedTLDs listed in this package. +func IsTLD(s string) bool { + return IsICANNTLD(s) || IsExtendedTLD(s) +} + +// IsDomain returns whether given string is a domain. +// It first checks the TLD, and then uses a regular expression. +func IsDomain(s string) bool { + if strings.HasSuffix(s, ".") { + s = s[:len(s)-1] + } + + split := strings.Split(s, ".") + tld := split[len(split)-1] + + if !IsTLD(tld) { + return false + } + + s = strings.ToLower(s) + return domainRegexp.MatchString(s) +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain_test.go b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain_test.go new file mode 100644 index 00000000000..6bd224ef424 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/is_domain_test.go @@ -0,0 +1,29 @@ +package isdomain + +import "testing" + +func TestBasic(t *testing.T) { + cases := map[string]bool{ + "foo.bar.baz.com": true, + "foo.bar.baz": false, + "foo.bar.baz.com.": true, + "com": false, // yeah yeah... + ".": false, // yeah yeah... + "..": false, + ".foo.com.": false, + ".foo.com": false, + "fo o.com": false, + "example.com": true, + "fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.org": true, + "fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.bit": true, + "fjdoisajfdiosafdsa8fd8saf8dsa8fdsafdsa-fd-sa-fd-saf-dsa.onion": true, + "a.b.c.d.e.f.g.h.i.j.k.l.museum": true, + "a.b.c.d.e.f.g.h.i.j.k.l": false, + } + + for d, ok := range cases { + if IsDomain(d) != ok { + t.Errorf("Misclassification: %v should be %v", d, ok) + } + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds-alpha-by-domain.txt b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds-alpha-by-domain.txt new file mode 100644 index 00000000000..521f80e7aba --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds-alpha-by-domain.txt @@ -0,0 +1,727 @@ +# from http://data.iana.org/TLD/tlds-alpha-by-domain.txt +# Version 2014100300, Last Updated Fri Oct 3 07:07:01 2014 UTC +AC +ACADEMY +ACCOUNTANTS +ACTIVE +ACTOR +AD +AE +AERO +AF +AG +AGENCY +AI +AIRFORCE +AL +ALLFINANZ +AM +AN +AO +AQ +AR +ARCHI +ARMY +ARPA +AS +ASIA +ASSOCIATES +AT +ATTORNEY +AU +AUCTION +AUDIO +AUTOS +AW +AX +AXA +AZ +BA +BAR +BARGAINS +BAYERN +BB +BD +BE +BEER +BERLIN +BEST +BF +BG +BH +BI +BID +BIKE +BIO +BIZ +BJ +BLACK +BLACKFRIDAY +BLUE +BM +BMW +BN +BNPPARIBAS +BO +BOO +BOUTIQUE +BR +BRUSSELS +BS +BT +BUDAPEST +BUILD +BUILDERS +BUSINESS +BUZZ +BV +BW +BY +BZ +BZH +CA +CAB +CAL +CAMERA +CAMP +CANCERRESEARCH +CAPETOWN +CAPITAL +CARAVAN +CARDS +CARE +CAREER +CAREERS +CASA +CASH +CAT +CATERING +CC +CD +CENTER +CEO +CERN +CF +CG +CH +CHANNEL +CHEAP +CHRISTMAS +CHROME +CHURCH +CI +CITIC +CITY +CK +CL +CLAIMS +CLEANING +CLICK +CLINIC +CLOTHING +CLUB +CM +CN +CO +CODES +COFFEE +COLLEGE +COLOGNE +COM +COMMUNITY +COMPANY +COMPUTER +CONDOS +CONSTRUCTION +CONSULTING +CONTRACTORS +COOKING +COOL +COOP +COUNTRY +CR +CREDIT +CREDITCARD +CRUISES +CU +CUISINELLA +CV +CW +CX +CY +CYMRU +CZ +DAD +DANCE +DATING +DAY +DE +DEALS +DEGREE +DEMOCRAT +DENTAL +DENTIST +DESI +DIAMONDS +DIET +DIGITAL +DIRECT +DIRECTORY +DISCOUNT +DJ +DK +DM +DNP +DO +DOMAINS +DURBAN +DVAG +DZ +EAT +EC +EDU +EDUCATION +EE +EG +EMAIL +ENGINEER +ENGINEERING +ENTERPRISES +EQUIPMENT +ER +ES +ESQ +ESTATE +ET +EU +EUS +EVENTS +EXCHANGE +EXPERT +EXPOSED +FAIL +FARM +FEEDBACK +FI +FINANCE +FINANCIAL +FISH +FISHING +FITNESS +FJ +FK +FLIGHTS +FLORIST +FLY +FM +FO +FOO +FORSALE +FOUNDATION +FR +FRL +FROGANS +FUND +FURNITURE +FUTBOL +GA +GAL +GALLERY +GB +GBIZ +GD +GE +GENT +GF +GG +GH +GI +GIFT +GIFTS +GIVES +GL +GLASS +GLE +GLOBAL +GLOBO +GM +GMAIL +GMO +GMX +GN +GOOGLE +GOP +GOV +GP +GQ +GR +GRAPHICS +GRATIS +GREEN +GRIPE +GS +GT +GU +GUIDE +GUITARS +GURU +GW +GY +HAMBURG +HAUS +HEALTHCARE +HELP +HERE +HIPHOP +HIV +HK +HM +HN +HOLDINGS +HOLIDAY +HOMES +HORSE +HOST +HOSTING +HOUSE +HOW +HR +HT +HU +IBM +ID +IE +IL +IM +IMMO +IMMOBILIEN +IN +INDUSTRIES +INFO +ING +INK +INSTITUTE +INSURE +INT +INTERNATIONAL +INVESTMENTS +IO +IQ +IR +IS +IT +JE +JETZT +JM +JO +JOBS +JOBURG +JP +JUEGOS +KAUFEN +KE +KG +KH +KI +KIM +KITCHEN +KIWI +KM +KN +KOELN +KP +KR +KRD +KRED +KW +KY +KZ +LA +LACAIXA +LAND +LAWYER +LB +LC +LEASE +LGBT +LI +LIFE +LIGHTING +LIMITED +LIMO +LINK +LK +LOANS +LONDON +LOTTO +LR +LS +LT +LTDA +LU +LUXE +LUXURY +LV +LY +MA +MAISON +MANAGEMENT +MANGO +MARKET +MARKETING +MC +MD +ME +MEDIA +MEET +MELBOURNE +MEME +MENU +MG +MH +MIAMI +MIL +MINI +MK +ML +MM +MN +MO +MOBI +MODA +MOE +MONASH +MORTGAGE +MOSCOW +MOTORCYCLES +MOV +MP +MQ +MR +MS +MT +MU +MUSEUM +MV +MW +MX +MY +MZ +NA +NAGOYA +NAME +NAVY +NC +NE +NET +NETWORK +NEUSTAR +NEW +NEXUS +NF +NG +NGO +NHK +NI +NINJA +NL +NO +NP +NR +NRA +NRW +NU +NYC +NZ +OKINAWA +OM +ONG +ONL +OOO +ORG +ORGANIC +OTSUKA +OVH +PA +PARIS +PARTNERS +PARTS +PE +PF +PG +PH +PHARMACY +PHOTO +PHOTOGRAPHY +PHOTOS +PHYSIO +PICS +PICTURES +PINK +PIZZA +PK +PL +PLACE +PLUMBING +PM +PN +POHL +POST +PR +PRAXI +PRESS +PRO +PROD +PRODUCTIONS +PROF +PROPERTIES +PROPERTY +PS +PT +PUB +PW +PY +QA +QPON +QUEBEC +RE +REALTOR +RECIPES +RED +REHAB +REISE +REISEN +REN +RENTALS +REPAIR +REPORT +REPUBLICAN +REST +RESTAURANT +REVIEWS +RICH +RIO +RO +ROCKS +RODEO +RS +RSVP +RU +RUHR +RW +RYUKYU +SA +SAARLAND +SARL +SB +SC +SCA +SCB +SCHMIDT +SCHULE +SCOT +SD +SE +SERVICES +SEXY +SG +SH +SHIKSHA +SHOES +SI +SINGLES +SJ +SK +SL +SM +SN +SO +SOCIAL +SOFTWARE +SOHU +SOLAR +SOLUTIONS +SOY +SPACE +SPIEGEL +SR +ST +SU +SUPPLIES +SUPPLY +SUPPORT +SURF +SURGERY +SUZUKI +SV +SX +SY +SYSTEMS +SZ +TATAR +TATTOO +TAX +TC +TD +TECHNOLOGY +TEL +TF +TG +TH +TIENDA +TIPS +TIROL +TJ +TK +TL +TM +TN +TO +TODAY +TOKYO +TOOLS +TOP +TOWN +TOYS +TP +TR +TRADE +TRAINING +TRAVEL +TT +TUI +TV +TW +TZ +UA +UG +UK +UNIVERSITY +UNO +UOL +US +UY +UZ +VA +VACATIONS +VC +VE +VEGAS +VENTURES +VERSICHERUNG +VET +VG +VI +VIAJES +VILLAS +VISION +VLAANDEREN +VN +VODKA +VOTE +VOTING +VOTO +VOYAGE +VU +WALES +WANG +WATCH +WEBCAM +WEBSITE +WED +WF +WHOSWHO +WIEN +WIKI +WILLIAMHILL +WME +WORK +WORKS +WORLD +WS +WTC +WTF +XN--1QQW23A +XN--3BST00M +XN--3DS443G +XN--3E0B707E +XN--45BRJ9C +XN--4GBRIM +XN--55QW42G +XN--55QX5D +XN--6FRZ82G +XN--6QQ986B3XL +XN--80ADXHKS +XN--80AO21A +XN--80ASEHDB +XN--80ASWG +XN--90A3AC +XN--C1AVG +XN--CG4BKI +XN--CLCHC0EA0B2G2A9GCD +XN--CZR694B +XN--CZRU2D +XN--D1ACJ3B +XN--FIQ228C5HS +XN--FIQ64B +XN--FIQS8S +XN--FIQZ9S +XN--FPCRJ9C3D +XN--FZC2C9E2C +XN--GECRJ9C +XN--H2BRJ9C +XN--I1B6B1A6A2E +XN--IO0A7I +XN--J1AMH +XN--J6W193G +XN--KPRW13D +XN--KPRY57D +XN--KPUT3I +XN--L1ACC +XN--LGBBAT1AD8J +XN--MGB9AWBF +XN--MGBA3A4F16A +XN--MGBAAM7A8H +XN--MGBAB2BD +XN--MGBAYH7GPA +XN--MGBBH1A71E +XN--MGBC0A9AZCG +XN--MGBERP4A5D4AR +XN--MGBX4CD0AB +XN--NGBC5AZD +XN--NQV7F +XN--NQV7FS00EMA +XN--O3CW4H +XN--OGBPF8FL +XN--P1ACF +XN--P1AI +XN--PGBS0DH +XN--Q9JYB4C +XN--RHQV96G +XN--S9BRJ9C +XN--SES554G +XN--UNUP4Y +XN--VERMGENSBERATER-CTB +XN--VERMGENSBERATUNG-PWB +XN--VHQUV +XN--WGBH1C +XN--WGBL6A +XN--XHQ521B +XN--XKC2AL3HYE2A +XN--XKC2DL3A5EE0H +XN--YFRO4I67O +XN--YGBI2AMMX +XN--ZFR164B +XXX +XYZ +YACHTS +YANDEX +YE +YOKOHAMA +YOUTUBE +YT +ZA +ZIP +ZM +ZONE +ZW diff --git a/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds.go b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds.go new file mode 100644 index 00000000000..c4969dbc2b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-is-domain/tlds.go @@ -0,0 +1,737 @@ +package isdomain + +// TLDs is a set of TLDs, according to ICANN in 2014. +var TLDs = map[string]bool{ + "AC": true, + "ACADEMY": true, + "ACCOUNTANTS": true, + "ACTIVE": true, + "ACTOR": true, + "AD": true, + "AE": true, + "AERO": true, + "AF": true, + "AG": true, + "AGENCY": true, + "AI": true, + "AIRFORCE": true, + "AL": true, + "ALLFINANZ": true, + "AM": true, + "AN": true, + "AO": true, + "AQ": true, + "AR": true, + "ARCHI": true, + "ARMY": true, + "ARPA": true, + "AS": true, + "ASIA": true, + "ASSOCIATES": true, + "AT": true, + "ATTORNEY": true, + "AU": true, + "AUCTION": true, + "AUDIO": true, + "AUTOS": true, + "AW": true, + "AX": true, + "AXA": true, + "AZ": true, + "BA": true, + "BAR": true, + "BARGAINS": true, + "BAYERN": true, + "BB": true, + "BD": true, + "BE": true, + "BEER": true, + "BERLIN": true, + "BEST": true, + "BF": true, + "BG": true, + "BH": true, + "BI": true, + "BID": true, + "BIKE": true, + "BIO": true, + "BIZ": true, + "BJ": true, + "BLACK": true, + "BLACKFRIDAY": true, + "BLUE": true, + "BM": true, + "BMW": true, + "BN": true, + "BNPPARIBAS": true, + "BO": true, + "BOO": true, + "BOUTIQUE": true, + "BR": true, + "BRUSSELS": true, + "BS": true, + "BT": true, + "BUDAPEST": true, + "BUILD": true, + "BUILDERS": true, + "BUSINESS": true, + "BUZZ": true, + "BV": true, + "BW": true, + "BY": true, + "BZ": true, + "BZH": true, + "CA": true, + "CAB": true, + "CAL": true, + "CAMERA": true, + "CAMP": true, + "CANCERRESEARCH": true, + "CAPETOWN": true, + "CAPITAL": true, + "CARAVAN": true, + "CARDS": true, + "CARE": true, + "CAREER": true, + "CAREERS": true, + "CASA": true, + "CASH": true, + "CAT": true, + "CATERING": true, + "CC": true, + "CD": true, + "CENTER": true, + "CEO": true, + "CERN": true, + "CF": true, + "CG": true, + "CH": true, + "CHANNEL": true, + "CHEAP": true, + "CHRISTMAS": true, + "CHROME": true, + "CHURCH": true, + "CI": true, + "CITIC": true, + "CITY": true, + "CK": true, + "CL": true, + "CLAIMS": true, + "CLEANING": true, + "CLICK": true, + "CLINIC": true, + "CLOTHING": true, + "CLUB": true, + "CM": true, + "CN": true, + "CO": true, + "CODES": true, + "COFFEE": true, + "COLLEGE": true, + "COLOGNE": true, + "COM": true, + "COMMUNITY": true, + "COMPANY": true, + "COMPUTER": true, + "CONDOS": true, + "CONSTRUCTION": true, + "CONSULTING": true, + "CONTRACTORS": true, + "COOKING": true, + "COOL": true, + "COOP": true, + "COUNTRY": true, + "CR": true, + "CREDIT": true, + "CREDITCARD": true, + "CRUISES": true, + "CU": true, + "CUISINELLA": true, + "CV": true, + "CW": true, + "CX": true, + "CY": true, + "CYMRU": true, + "CZ": true, + "DAD": true, + "DANCE": true, + "DATING": true, + "DAY": true, + "DE": true, + "DEALS": true, + "DEGREE": true, + "DEMOCRAT": true, + "DENTAL": true, + "DENTIST": true, + "DESI": true, + "DIAMONDS": true, + "DIET": true, + "DIGITAL": true, + "DIRECT": true, + "DIRECTORY": true, + "DISCOUNT": true, + "DJ": true, + "DK": true, + "DM": true, + "DNP": true, + "DO": true, + "DOMAINS": true, + "DURBAN": true, + "DVAG": true, + "DZ": true, + "EAT": true, + "EC": true, + "EDU": true, + "EDUCATION": true, + "EE": true, + "EG": true, + "EMAIL": true, + "ENGINEER": true, + "ENGINEERING": true, + "ENTERPRISES": true, + "EQUIPMENT": true, + "ER": true, + "ES": true, + "ESQ": true, + "ESTATE": true, + "ET": true, + "EU": true, + "EUS": true, + "EVENTS": true, + "EXCHANGE": true, + "EXPERT": true, + "EXPOSED": true, + "FAIL": true, + "FARM": true, + "FEEDBACK": true, + "FI": true, + "FINANCE": true, + "FINANCIAL": true, + "FISH": true, + "FISHING": true, + "FITNESS": true, + "FJ": true, + "FK": true, + "FLIGHTS": true, + "FLORIST": true, + "FLY": true, + "FM": true, + "FO": true, + "FOO": true, + "FORSALE": true, + "FOUNDATION": true, + "FR": true, + "FRL": true, + "FROGANS": true, + "FUND": true, + "FURNITURE": true, + "FUTBOL": true, + "GA": true, + "GAL": true, + "GALLERY": true, + "GB": true, + "GBIZ": true, + "GD": true, + "GE": true, + "GENT": true, + "GF": true, + "GG": true, + "GH": true, + "GI": true, + "GIFT": true, + "GIFTS": true, + "GIVES": true, + "GL": true, + "GLASS": true, + "GLE": true, + "GLOBAL": true, + "GLOBO": true, + "GM": true, + "GMAIL": true, + "GMO": true, + "GMX": true, + "GN": true, + "GOOGLE": true, + "GOP": true, + "GOV": true, + "GP": true, + "GQ": true, + "GR": true, + "GRAPHICS": true, + "GRATIS": true, + "GREEN": true, + "GRIPE": true, + "GS": true, + "GT": true, + "GU": true, + "GUIDE": true, + "GUITARS": true, + "GURU": true, + "GW": true, + "GY": true, + "HAMBURG": true, + "HAUS": true, + "HEALTHCARE": true, + "HELP": true, + "HERE": true, + "HIPHOP": true, + "HIV": true, + "HK": true, + "HM": true, + "HN": true, + "HOLDINGS": true, + "HOLIDAY": true, + "HOMES": true, + "HORSE": true, + "HOST": true, + "HOSTING": true, + "HOUSE": true, + "HOW": true, + "HR": true, + "HT": true, + "HU": true, + "IBM": true, + "ID": true, + "IE": true, + "IL": true, + "IM": true, + "IMMO": true, + "IMMOBILIEN": true, + "IN": true, + "INDUSTRIES": true, + "INFO": true, + "ING": true, + "INK": true, + "INSTITUTE": true, + "INSURE": true, + "INT": true, + "INTERNATIONAL": true, + "INVESTMENTS": true, + "IO": true, + "IQ": true, + "IR": true, + "IS": true, + "IT": true, + "JE": true, + "JETZT": true, + "JM": true, + "JO": true, + "JOBS": true, + "JOBURG": true, + "JP": true, + "JUEGOS": true, + "KAUFEN": true, + "KE": true, + "KG": true, + "KH": true, + "KI": true, + "KIM": true, + "KITCHEN": true, + "KIWI": true, + "KM": true, + "KN": true, + "KOELN": true, + "KP": true, + "KR": true, + "KRD": true, + "KRED": true, + "KW": true, + "KY": true, + "KZ": true, + "LA": true, + "LACAIXA": true, + "LAND": true, + "LAWYER": true, + "LB": true, + "LC": true, + "LEASE": true, + "LGBT": true, + "LI": true, + "LIFE": true, + "LIGHTING": true, + "LIMITED": true, + "LIMO": true, + "LINK": true, + "LK": true, + "LOANS": true, + "LONDON": true, + "LOTTO": true, + "LR": true, + "LS": true, + "LT": true, + "LTDA": true, + "LU": true, + "LUXE": true, + "LUXURY": true, + "LV": true, + "LY": true, + "MA": true, + "MAISON": true, + "MANAGEMENT": true, + "MANGO": true, + "MARKET": true, + "MARKETING": true, + "MC": true, + "MD": true, + "ME": true, + "MEDIA": true, + "MEET": true, + "MELBOURNE": true, + "MEME": true, + "MENU": true, + "MG": true, + "MH": true, + "MIAMI": true, + "MIL": true, + "MINI": true, + "MK": true, + "ML": true, + "MM": true, + "MN": true, + "MO": true, + "MOBI": true, + "MODA": true, + "MOE": true, + "MONASH": true, + "MORTGAGE": true, + "MOSCOW": true, + "MOTORCYCLES": true, + "MOV": true, + "MP": true, + "MQ": true, + "MR": true, + "MS": true, + "MT": true, + "MU": true, + "MUSEUM": true, + "MV": true, + "MW": true, + "MX": true, + "MY": true, + "MZ": true, + "NA": true, + "NAGOYA": true, + "NAME": true, + "NAVY": true, + "NC": true, + "NE": true, + "NET": true, + "NETWORK": true, + "NEUSTAR": true, + "NEW": true, + "NEXUS": true, + "NF": true, + "NG": true, + "NGO": true, + "NHK": true, + "NI": true, + "NINJA": true, + "NL": true, + "NO": true, + "NP": true, + "NR": true, + "NRA": true, + "NRW": true, + "NU": true, + "NYC": true, + "NZ": true, + "OKINAWA": true, + "OM": true, + "ONG": true, + "ONL": true, + "OOO": true, + "ORG": true, + "ORGANIC": true, + "OTSUKA": true, + "OVH": true, + "PA": true, + "PARIS": true, + "PARTNERS": true, + "PARTS": true, + "PE": true, + "PF": true, + "PG": true, + "PH": true, + "PHARMACY": true, + "PHOTO": true, + "PHOTOGRAPHY": true, + "PHOTOS": true, + "PHYSIO": true, + "PICS": true, + "PICTURES": true, + "PINK": true, + "PIZZA": true, + "PK": true, + "PL": true, + "PLACE": true, + "PLUMBING": true, + "PM": true, + "PN": true, + "POHL": true, + "POST": true, + "PR": true, + "PRAXI": true, + "PRESS": true, + "PRO": true, + "PROD": true, + "PRODUCTIONS": true, + "PROF": true, + "PROPERTIES": true, + "PROPERTY": true, + "PS": true, + "PT": true, + "PUB": true, + "PW": true, + "PY": true, + "QA": true, + "QPON": true, + "QUEBEC": true, + "RE": true, + "REALTOR": true, + "RECIPES": true, + "RED": true, + "REHAB": true, + "REISE": true, + "REISEN": true, + "REN": true, + "RENTALS": true, + "REPAIR": true, + "REPORT": true, + "REPUBLICAN": true, + "REST": true, + "RESTAURANT": true, + "REVIEWS": true, + "RICH": true, + "RIO": true, + "RO": true, + "ROCKS": true, + "RODEO": true, + "RS": true, + "RSVP": true, + "RU": true, + "RUHR": true, + "RW": true, + "RYUKYU": true, + "SA": true, + "SAARLAND": true, + "SARL": true, + "SB": true, + "SC": true, + "SCA": true, + "SCB": true, + "SCHMIDT": true, + "SCHULE": true, + "SCOT": true, + "SD": true, + "SE": true, + "SERVICES": true, + "SEXY": true, + "SG": true, + "SH": true, + "SHIKSHA": true, + "SHOES": true, + "SI": true, + "SINGLES": true, + "SJ": true, + "SK": true, + "SL": true, + "SM": true, + "SN": true, + "SO": true, + "SOCIAL": true, + "SOFTWARE": true, + "SOHU": true, + "SOLAR": true, + "SOLUTIONS": true, + "SOY": true, + "SPACE": true, + "SPIEGEL": true, + "SR": true, + "ST": true, + "SU": true, + "SUPPLIES": true, + "SUPPLY": true, + "SUPPORT": true, + "SURF": true, + "SURGERY": true, + "SUZUKI": true, + "SV": true, + "SX": true, + "SY": true, + "SYSTEMS": true, + "SZ": true, + "TATAR": true, + "TATTOO": true, + "TAX": true, + "TC": true, + "TD": true, + "TECHNOLOGY": true, + "TEL": true, + "TF": true, + "TG": true, + "TH": true, + "TIENDA": true, + "TIPS": true, + "TIROL": true, + "TJ": true, + "TK": true, + "TL": true, + "TM": true, + "TN": true, + "TO": true, + "TODAY": true, + "TOKYO": true, + "TOOLS": true, + "TOP": true, + "TOWN": true, + "TOYS": true, + "TP": true, + "TR": true, + "TRADE": true, + "TRAINING": true, + "TRAVEL": true, + "TT": true, + "TUI": true, + "TV": true, + "TW": true, + "TZ": true, + "UA": true, + "UG": true, + "UK": true, + "UNIVERSITY": true, + "UNO": true, + "UOL": true, + "US": true, + "UY": true, + "UZ": true, + "VA": true, + "VACATIONS": true, + "VC": true, + "VE": true, + "VEGAS": true, + "VENTURES": true, + "VERSICHERUNG": true, + "VET": true, + "VG": true, + "VI": true, + "VIAJES": true, + "VILLAS": true, + "VISION": true, + "VLAANDEREN": true, + "VN": true, + "VODKA": true, + "VOTE": true, + "VOTING": true, + "VOTO": true, + "VOYAGE": true, + "VU": true, + "WALES": true, + "WANG": true, + "WATCH": true, + "WEBCAM": true, + "WEBSITE": true, + "WED": true, + "WF": true, + "WHOSWHO": true, + "WIEN": true, + "WIKI": true, + "WILLIAMHILL": true, + "WME": true, + "WORK": true, + "WORKS": true, + "WORLD": true, + "WS": true, + "WTC": true, + "WTF": true, + "XN--1QQW23A": true, + "XN--3BST00M": true, + "XN--3DS443G": true, + "XN--3E0B707E": true, + "XN--45BRJ9C": true, + "XN--4GBRIM": true, + "XN--55QW42G": true, + "XN--55QX5D": true, + "XN--6FRZ82G": true, + "XN--6QQ986B3XL": true, + "XN--80ADXHKS": true, + "XN--80AO21A": true, + "XN--80ASEHDB": true, + "XN--80ASWG": true, + "XN--90A3AC": true, + "XN--C1AVG": true, + "XN--CG4BKI": true, + "XN--CLCHC0EA0B2G2A9GCD": true, + "XN--CZR694B": true, + "XN--CZRU2D": true, + "XN--D1ACJ3B": true, + "XN--FIQ228C5HS": true, + "XN--FIQ64B": true, + "XN--FIQS8S": true, + "XN--FIQZ9S": true, + "XN--FPCRJ9C3D": true, + "XN--FZC2C9E2C": true, + "XN--GECRJ9C": true, + "XN--H2BRJ9C": true, + "XN--I1B6B1A6A2E": true, + "XN--IO0A7I": true, + "XN--J1AMH": true, + "XN--J6W193G": true, + "XN--KPRW13D": true, + "XN--KPRY57D": true, + "XN--KPUT3I": true, + "XN--L1ACC": true, + "XN--LGBBAT1AD8J": true, + "XN--MGB9AWBF": true, + "XN--MGBA3A4F16A": true, + "XN--MGBAAM7A8H": true, + "XN--MGBAB2BD": true, + "XN--MGBAYH7GPA": true, + "XN--MGBBH1A71E": true, + "XN--MGBC0A9AZCG": true, + "XN--MGBERP4A5D4AR": true, + "XN--MGBX4CD0AB": true, + "XN--NGBC5AZD": true, + "XN--NQV7F": true, + "XN--NQV7FS00EMA": true, + "XN--O3CW4H": true, + "XN--OGBPF8FL": true, + "XN--P1ACF": true, + "XN--P1AI": true, + "XN--PGBS0DH": true, + "XN--Q9JYB4C": true, + "XN--RHQV96G": true, + "XN--S9BRJ9C": true, + "XN--SES554G": true, + "XN--UNUP4Y": true, + "XN--VERMGENSBERATER-CTB": true, + "XN--VERMGENSBERATUNG-PWB": true, + "XN--VHQUV": true, + "XN--WGBH1C": true, + "XN--WGBL6A": true, + "XN--XHQ521B": true, + "XN--XKC2AL3HYE2A": true, + "XN--XKC2DL3A5EE0H": true, + "XN--YFRO4I67O": true, + "XN--YGBI2AMMX": true, + "XN--ZFR164B": true, + "XXX": true, + "XYZ": true, + "YACHTS": true, + "YANDEX": true, + "YE": true, + "YOKOHAMA": true, + "YOUTUBE": true, + "YT": true, + "ZA": true, + "ZIP": true, + "ZM": true, + "ZONE": true, + "ZW": true, +} + +// ExtendedTLDs is a set of additional "TLDs", allowing decentralized name +// systems, like TOR and Namecoin. +var ExtendedTLDs = map[string]bool{ + "BIT": true, + "ONION": true, +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/LICENSE b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/LICENSE new file mode 100644 index 00000000000..c7386b3c940 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md index b63ffa2dad0..7ff9854f18f 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/README.md @@ -7,18 +7,20 @@ ### Simple ```go -import "github.com/jbenet/go-multiaddr" +import ma "github.com/jbenet/go-multiaddr" -m := multiaddr.NewMultiaddr("/ip4/127.0.0.1/udp/1234") -// -m.buffer -// -m.String() -// /ip4/127.0.0.1/udp/1234 +// construct from a string (err signals parse failure) +m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") -// construct with Buffer -m = multiaddr.Multiaddr{ Bytes: m.Bytes } -// +// construct from bytes (err signals parse failure) +m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + +// true +strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") +strings.Equal(m1.String(), m2.String()) +bytes.Equal(m1.Bytes(), m2.Bytes()) +m1.Equal(m2) +m2.Equal(m1) ``` ### Protocols @@ -36,7 +38,7 @@ addr.Protocols() ```go // handles the stupid url version too -m = multiaddr.NewUrl("udp4://127.0.0.1:1234") +m = ma.NewUrl("udp4://127.0.0.1:1234") // m.Url(buf) // udp4://127.0.0.1:1234 @@ -45,9 +47,9 @@ m.Url(buf) ### En/decapsulate ```go -m.Encapsulate(m.NewMultiaddr("/sctp/5678")) +m.Encapsulate(ma.NewMultiaddr("/sctp/5678")) // -m.Decapsulate(m.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr +m.Decapsulate(ma.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr // ``` @@ -56,11 +58,11 @@ m.Decapsulate(m.NewMultiaddr("/udp")) // up to + inc last occurrence of subaddr Multiaddr allows expressing tunnels very nicely. ```js -printer := multiaddr.NewMultiaddr("/ip4/192.168.0.13/tcp/80") -proxy := multiaddr.NewMultiaddr("/ip4/10.20.30.40/tcp/443") +printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") +proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") printerOverProxy := proxy.Encapsulate(printer) -// +// /ip4/10.20.30.40/tcp/443/ip4/192.168.0.13/tcp/80 -proxyAgain := printerOverProxy.Decapsulate(multiaddr.NewMultiaddr("/ip4")) -// +proxyAgain := printerOverProxy.Decapsulate(printer) +// /ip4/10.20.30.40/tcp/443 ``` diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/doc.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/doc.go new file mode 100644 index 00000000000..c26e443cbd2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/doc.go @@ -0,0 +1,36 @@ +/* +Package multiaddr provides an implementation of the Multiaddr network +address format. Multiaddr emphasizes explicitness, self-description, and +portability. It allows applications to treat addresses as opaque tokens, +and to avoid making assumptions about the address representation (e.g. length). +Learn more at https://github.com/jbenet/multiaddr + +Basic Use: + + import ( + "bytes" + "strings" + ma "github.com/jbenet/go-multiaddr" + ) + + // construct from a string (err signals parse failure) + m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") + + // construct from bytes (err signals parse failure) + m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + + // true + strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") + strings.Equal(m1.String(), m2.String()) + bytes.Equal(m1.Bytes(), m2.Bytes()) + m1.Equal(m2) + m2.Equal(m1) + + // tunneling (en/decap) + printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") + proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") + printerOverProxy := proxy.Encapsulate(printer) + proxyAgain := printerOverProxy.Decapsulate(printer) + +*/ +package multiaddr diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go deleted file mode 100644 index a55fa669ec7..00000000000 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go +++ /dev/null @@ -1,117 +0,0 @@ -package multiaddr - -import ( - "bytes" - "fmt" - "strings" -) - -// Multiaddr is the data structure representing a multiaddr -type Multiaddr struct { - Bytes []byte -} - -// NewMultiaddr parses and validates an input string, returning a *Multiaddr -func NewMultiaddr(s string) (*Multiaddr, error) { - b, err := stringToBytes(s) - if err != nil { - return nil, err - } - return &Multiaddr{Bytes: b}, nil -} - -// Equal tests whether two multiaddrs are equal -func (m *Multiaddr) Equal(m2 *Multiaddr) bool { - return bytes.Equal(m.Bytes, m2.Bytes) -} - -// String returns the string representation of a Multiaddr -func (m *Multiaddr) String() (string, error) { - return bytesToString(m.Bytes) -} - -// Protocols returns the list of protocols this Multiaddr has. -func (m *Multiaddr) Protocols() (ret []*Protocol, err error) { - - // panic handler, in case we try accessing bytes incorrectly. - defer func() { - if e := recover(); e != nil { - ret = nil - err = e.(error) - } - }() - - ps := []*Protocol{} - b := m.Bytes[:] - for len(b) > 0 { - p := ProtocolWithCode(int(b[0])) - if p == nil { - return nil, fmt.Errorf("no protocol with code %d", b[0]) - } - ps = append(ps, p) - b = b[1+(p.Size/8):] - } - return ps, nil -} - -// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr -func (m *Multiaddr) Encapsulate(o *Multiaddr) *Multiaddr { - b := make([]byte, len(m.Bytes)+len(o.Bytes)) - b = append(m.Bytes, o.Bytes...) - return &Multiaddr{Bytes: b} -} - -// Decapsulate unwraps Multiaddr up until the given Multiaddr is found. -func (m *Multiaddr) Decapsulate(o *Multiaddr) (*Multiaddr, error) { - s1, err := m.String() - if err != nil { - return nil, err - } - - s2, err := o.String() - if err != nil { - return nil, err - } - - i := strings.LastIndex(s1, s2) - if i < 0 { - return nil, fmt.Errorf("%s not contained in %s", s2, s1) - } - return NewMultiaddr(s1[:i]) -} - -// DialArgs is a convenience function returning arguments for use in net.Dial -func (m *Multiaddr) DialArgs() (string, string, error) { - if !m.IsThinWaist() { - return "", "", fmt.Errorf("%s is not a 'thin waist' address", m) - } - - str, err := m.String() - if err != nil { - return "", "", err - } - - parts := strings.Split(str, "/")[1:] - network := parts[2] - host := strings.Join([]string{parts[1], parts[3]}, ":") - return network, host, nil -} - -// IsThinWaist returns whether this multiaddr includes "Thin Waist" Protocols. -// This means: /{IP4, IP6}/{TCP, UDP} -func (m *Multiaddr) IsThinWaist() bool { - p, err := m.Protocols() - if err != nil { - return false - } - - if p[0].Code != P_IP4 && p[0].Code != P_IP6 { - return false - } - - if p[1].Code != P_TCP && p[1].Code != P_UDP { - return false - } - - return true -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/interface.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/interface.go new file mode 100644 index 00000000000..6f57625a3c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/interface.go @@ -0,0 +1,42 @@ +package multiaddr + +/* +Multiaddr is a cross-protocol, cross-platform format for representing +internet addresses. It emphasizes explicitness and self-description. +Learn more here: https://github.com/jbenet/multiaddr + +Multiaddrs have both a binary and string representation. + + import ma "github.com/jbenet/go-multiaddr" + + addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80") + // err non-nil when parsing failed. + +*/ +type Multiaddr interface { + // Equal returns whether two Multiaddrs are exactly equal + Equal(Multiaddr) bool + + // Bytes returns the []byte representation of this Multiaddr + Bytes() []byte + + // String returns the string representation of this Multiaddr + // (may panic if internal state is corrupted) + String() string + + // Protocols returns the list of Protocols this Multiaddr includes + // will panic if protocol code incorrect (and bytes accessed incorrectly) + Protocols() []*Protocol + + // Encapsulate wraps this Multiaddr around another. For example: + // + // /ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80 + // + Encapsulate(Multiaddr) Multiaddr + + // Decapsultate removes a Multiaddr wrapping. For example: + // + // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80 + // + Decapsulate(Multiaddr) Multiaddr +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr.go new file mode 100644 index 00000000000..4ee63ca43bb --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr.go @@ -0,0 +1,110 @@ +package multiaddr + +import ( + "bytes" + "fmt" + "strings" +) + +// multiaddr is the data structure representing a Multiaddr +type multiaddr struct { + bytes []byte +} + +// NewMultiaddr parses and validates an input string, returning a *Multiaddr +func NewMultiaddr(s string) (Multiaddr, error) { + b, err := stringToBytes(s) + if err != nil { + return nil, err + } + return &multiaddr{bytes: b}, nil +} + +// NewMultiaddrBytes initializes a Multiaddr from a byte representation. +// It validates it as an input string. +func NewMultiaddrBytes(b []byte) (Multiaddr, error) { + s, err := bytesToString(b) + if err != nil { + return nil, err + } + return NewMultiaddr(s) +} + +// Equal tests whether two multiaddrs are equal +func (m *multiaddr) Equal(m2 Multiaddr) bool { + return bytes.Equal(m.bytes, m2.Bytes()) +} + +// Bytes returns the []byte representation of this Multiaddr +func (m *multiaddr) Bytes() []byte { + // consider returning copy to prevent changing underneath us? + cpy := make([]byte, len(m.bytes)) + copy(cpy, m.bytes) + return cpy +} + +// String returns the string representation of a Multiaddr +func (m *multiaddr) String() string { + s, err := bytesToString(m.bytes) + if err != nil { + panic("multiaddr failed to convert back to string. corrupted?") + } + return s +} + +// Protocols returns the list of protocols this Multiaddr has. +// will panic in case we access bytes incorrectly. +func (m *multiaddr) Protocols() []*Protocol { + + // panic handler, in case we try accessing bytes incorrectly. + defer func() { + if e := recover(); e != nil { + err := e.(error) + panic("Multiaddr.Protocols error: " + err.Error()) + } + }() + + ps := []*Protocol{} + b := m.bytes[:] + for len(b) > 0 { + p := ProtocolWithCode(int(b[0])) + if p == nil { + // this is a panic (and not returning err) because this should've been + // caught on constructing the Multiaddr + panic(fmt.Errorf("no protocol with code %d", b[0])) + } + ps = append(ps, p) + b = b[1+(p.Size/8):] + } + return ps +} + +// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr +func (m *multiaddr) Encapsulate(o Multiaddr) Multiaddr { + mb := m.bytes + ob := o.Bytes() + + var b bytes.Buffer + b.Write(mb) + b.Write(ob) + return &multiaddr{bytes: b.Bytes()} +} + +// Decapsulate unwraps Multiaddr up until the given Multiaddr is found. +func (m *multiaddr) Decapsulate(o Multiaddr) Multiaddr { + s1 := m.String() + s2 := o.String() + i := strings.LastIndex(s1, s2) + if i < 0 { + // if multiaddr not contained, returns a copy. + cpy := make([]byte, len(m.bytes)) + copy(cpy, m.bytes) + return &multiaddr{bytes: cpy} + } + + ma, err := NewMultiaddr(s1[:i]) + if err != nil { + panic("Multiaddr.Decapsulate incorrect byte boundaries.") + } + return ma +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go index 7bc2e92bfce..12d6214630e 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -func newMultiaddr(t *testing.T, a string) *Multiaddr { +func newMultiaddr(t *testing.T, a string) Multiaddr { m, err := NewMultiaddr(a) if err != nil { t.Error(err) @@ -88,11 +88,7 @@ func TestProtocols(t *testing.T) { t.Error("failed to construct", "/ip4/127.0.0.1/udp/1234") } - ps, err := m.Protocols() - if err != nil { - t.Error("failed to get protocols", "/ip4/127.0.0.1/udp/1234") - } - + ps := m.Protocols() if ps[0] != ProtocolWithName("ip4") { t.Error(ps[0], ProtocolWithName("ip4")) t.Error("failed to get ip4 protocol") @@ -117,47 +113,19 @@ func TestEncapsulate(t *testing.T) { } b := m.Encapsulate(m2) - if s, _ := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" { + if s := b.String(); s != "/ip4/127.0.0.1/udp/1234/udp/5678" { t.Error("encapsulate /ip4/127.0.0.1/udp/1234/udp/5678 failed.", s) } m3, _ := NewMultiaddr("/udp/5678") - c, err := b.Decapsulate(m3) - if err != nil { - t.Error("decapsulate /udp failed.", err) - } - - if s, _ := c.String(); s != "/ip4/127.0.0.1/udp/1234" { + c := b.Decapsulate(m3) + if s := c.String(); s != "/ip4/127.0.0.1/udp/1234" { t.Error("decapsulate /udp failed.", "/ip4/127.0.0.1/udp/1234", s) } m4, _ := NewMultiaddr("/ip4/127.0.0.1") - d, err := c.Decapsulate(m4) - if err != nil { - t.Error("decapsulate /ip4 failed.", err) - } - - if s, _ := d.String(); s != "" { + d := c.Decapsulate(m4) + if s := d.String(); s != "" { t.Error("decapsulate /ip4 failed.", "/", s) } } - -func TestDialArgs(t *testing.T) { - m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") - if err != nil { - t.Fatal("failed to construct", "/ip4/127.0.0.1/udp/1234") - } - - nw, host, err := m.DialArgs() - if err != nil { - t.Fatal("failed to get dial args", "/ip4/127.0.0.1/udp/1234", err) - } - - if nw != "udp" { - t.Error("failed to get udp network Dial Arg") - } - - if host != "127.0.0.1:1234" { - t.Error("failed to get host:port Dial Arg") - } -} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go index 516fe8392f2..ed91dc2e14b 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go @@ -3,12 +3,13 @@ package multiaddr import ( "fmt" "net" + "strings" ) var errIncorrectNetAddr = fmt.Errorf("incorrect network addr conversion") // FromNetAddr converts a net.Addr type to a Multiaddr. -func FromNetAddr(a net.Addr) (*Multiaddr, error) { +func FromNetAddr(a net.Addr) (Multiaddr, error) { switch a.Network() { case "tcp", "tcp4", "tcp6": ac, ok := a.(*net.TCPAddr) @@ -65,7 +66,7 @@ func FromNetAddr(a net.Addr) (*Multiaddr, error) { } // FromIP converts a net.IP type to a Multiaddr. -func FromIP(ip net.IP) (*Multiaddr, error) { +func FromIP(ip net.IP) (Multiaddr, error) { switch { case ip.To4() != nil: return NewMultiaddr("/ip4/" + ip.String()) @@ -75,3 +76,38 @@ func FromIP(ip net.IP) (*Multiaddr, error) { return nil, errIncorrectNetAddr } } + +// DialArgs is a convenience function returning arguments for use in net.Dial +func DialArgs(m Multiaddr) (string, string, error) { + if !IsThinWaist(m) { + return "", "", fmt.Errorf("%s is not a 'thin waist' address", m) + } + + str := m.String() + parts := strings.Split(str, "/")[1:] + network := parts[2] + + var host string + switch parts[0] { + case "ip4": + host = strings.Join([]string{parts[1], parts[3]}, ":") + case "ip6": + host = fmt.Sprintf("[%s]:%s", parts[1], parts[3]) + } + return network, host, nil +} + +// IsThinWaist returns whether a Multiaddr starts with "Thin Waist" Protocols. +// This means: /{IP4, IP6}/{TCP, UDP} +func IsThinWaist(m Multiaddr) bool { + p := m.Protocols() + if p[0].Code != P_IP4 && p[0].Code != P_IP6 { + return false + } + + if p[1].Code != P_TCP && p[1].Code != P_UDP { + return false + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go index fd1ede1f1c3..c9cb4b4ac8f 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -type GenFunc func() (*Multiaddr, error) +type GenFunc func() (Multiaddr, error) func testConvert(t *testing.T, s string, gen GenFunc) { m, err := gen() @@ -13,25 +13,25 @@ func testConvert(t *testing.T, s string, gen GenFunc) { t.Fatal("failed to generate.") } - if s2, _ := m.String(); err != nil || s2 != s { + if s2 := m.String(); err != nil || s2 != s { t.Fatal("failed to convert: " + s + " != " + s2) } } func TestFromIP4(t *testing.T) { - testConvert(t, "/ip4/10.20.30.40", func() (*Multiaddr, error) { + testConvert(t, "/ip4/10.20.30.40", func() (Multiaddr, error) { return FromIP(net.ParseIP("10.20.30.40")) }) } func TestFromIP6(t *testing.T) { - testConvert(t, "/ip6/2001:4860:0:2001::68", func() (*Multiaddr, error) { + testConvert(t, "/ip6/2001:4860:0:2001::68", func() (Multiaddr, error) { return FromIP(net.ParseIP("2001:4860:0:2001::68")) }) } func TestFromTCP(t *testing.T) { - testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (*Multiaddr, error) { + testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (Multiaddr, error) { return FromNetAddr(&net.TCPAddr{ IP: net.ParseIP("10.20.30.40"), Port: 1234, @@ -40,10 +40,30 @@ func TestFromTCP(t *testing.T) { } func TestFromUDP(t *testing.T) { - testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (*Multiaddr, error) { + testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (Multiaddr, error) { return FromNetAddr(&net.UDPAddr{ IP: net.ParseIP("10.20.30.40"), Port: 1234, }) }) } + +func TestDialArgs(t *testing.T) { + m, err := NewMultiaddr("/ip4/127.0.0.1/udp/1234") + if err != nil { + t.Fatal("failed to construct", "/ip4/127.0.0.1/udp/1234") + } + + nw, host, err := DialArgs(m) + if err != nil { + t.Fatal("failed to get dial args", "/ip4/127.0.0.1/udp/1234", err) + } + + if nw != "udp" { + t.Error("failed to get udp network Dial Arg") + } + + if host != "127.0.0.1:1234" { + t.Error("failed to get host:port Dial Arg") + } +} diff --git a/blocks/blocks.go b/blocks/blocks.go index c58ab9f209e..696c774ab4d 100644 --- a/blocks/blocks.go +++ b/blocks/blocks.go @@ -12,12 +12,8 @@ type Block struct { } // NewBlock creates a Block object from opaque data. It will hash the data. -func NewBlock(data []byte) (*Block, error) { - h, err := u.Hash(data) - if err != nil { - return nil, err - } - return &Block{Data: data, Multihash: h}, nil +func NewBlock(data []byte) *Block { + return &Block{Data: data, Multihash: u.Hash(data)} } // Key returns the block's Multihash as a Key value. diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go index 915d84c023f..53a85227575 100644 --- a/blocks/blocks_test.go +++ b/blocks/blocks_test.go @@ -6,20 +6,11 @@ func TestBlocksBasic(t *testing.T) { // Test empty data empty := []byte{} - _, err := NewBlock(empty) - if err != nil { - t.Fatal(err) - } + NewBlock(empty) // Test nil case - _, err = NewBlock(nil) - if err != nil { - t.Fatal(err) - } + NewBlock(nil) // Test some data - _, err = NewBlock([]byte("Hello world!")) - if err != nil { - t.Fatal(err) - } + NewBlock([]byte("Hello world!")) } diff --git a/blockservice/blocks_test.go b/blockservice/blocks_test.go index c610fbd2a17..764d2d400eb 100644 --- a/blockservice/blocks_test.go +++ b/blockservice/blocks_test.go @@ -17,18 +17,8 @@ func TestBlocks(t *testing.T) { return } - b, err := blocks.NewBlock([]byte("beep boop")) - if err != nil { - t.Error("failed to construct block", err) - return - } - - h, err := u.Hash([]byte("beep boop")) - if err != nil { - t.Error("failed to hash data", err) - return - } - + b := blocks.NewBlock([]byte("beep boop")) + h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash, h) { t.Error("Block Multihash and data multihash not equal") } diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 1fbbfcb4405..dcf15ce95b9 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -13,6 +13,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("blockservice") + // BlockService is a block datastore. // It uses an internal `datastore.Datastore` instance to store values. type BlockService struct { @@ -26,7 +28,7 @@ func NewBlockService(d ds.Datastore, rem exchange.Interface) (*BlockService, err return nil, fmt.Errorf("BlockService requires valid datastore") } if rem == nil { - u.DErr("Caution: blockservice running in local (offline) mode.\n") + log.Warning("blockservice running in local (offline) mode.") } return &BlockService{Datastore: d, Remote: rem}, nil } @@ -34,11 +36,10 @@ func NewBlockService(d ds.Datastore, rem exchange.Interface) (*BlockService, err // AddBlock adds a particular block to the service, Putting it into the datastore. func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) { k := b.Key() - dsk := ds.NewKey(string(k)) - u.DOut("storing [%s] in datastore\n", k.Pretty()) + log.Debug("blockservice: storing [%s] in datastore", k) // TODO(brian): define a block datastore with a Put method which accepts a // block parameter - err := s.Datastore.Put(dsk, b.Data) + err := s.Datastore.Put(k.DsKey(), b.Data) if err != nil { return k, err } @@ -52,11 +53,10 @@ func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) { // GetBlock retrieves a particular block from the service, // Getting it from the datastore using the key (hash). func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) { - u.DOut("BlockService GetBlock: '%s'\n", k.Pretty()) - dsk := ds.NewKey(string(k)) - datai, err := s.Datastore.Get(dsk) + log.Debug("BlockService GetBlock: '%s'", k) + datai, err := s.Datastore.Get(k.DsKey()) if err == nil { - u.DOut("Blockservice: Got data in datastore.\n") + log.Debug("Blockservice: Got data in datastore.") bdata, ok := datai.([]byte) if !ok { return nil, fmt.Errorf("data associated with %s is not a []byte", k) @@ -66,7 +66,7 @@ func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) { Data: bdata, }, nil } else if err == ds.ErrNotFound && s.Remote != nil { - u.DOut("Blockservice: Searching bitswap.\n") + log.Debug("Blockservice: Searching bitswap.") ctx, _ := context.WithTimeout(context.TODO(), 5*time.Second) blk, err := s.Remote.Block(ctx, k) if err != nil { @@ -74,7 +74,7 @@ func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) { } return blk, nil } else { - u.DOut("Blockservice GetBlock: Not found.\n") + log.Debug("Blockservice GetBlock: Not found.") return nil, u.ErrNotFound } } diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index a4fc1f65d85..6a0d10b74c8 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -13,7 +13,7 @@ var ValueTypeMismatch = errors.New("The retrieved value is not a Block") type Blockstore interface { Get(u.Key) (*blocks.Block, error) - Put(blocks.Block) error + Put(*blocks.Block) error } func NewBlockstore(d ds.Datastore) Blockstore { @@ -27,7 +27,7 @@ type blockstore struct { } func (bs *blockstore) Get(k u.Key) (*blocks.Block, error) { - maybeData, err := bs.datastore.Get(toDatastoreKey(k)) + maybeData, err := bs.datastore.Get(k.DsKey()) if err != nil { return nil, err } @@ -35,13 +35,9 @@ func (bs *blockstore) Get(k u.Key) (*blocks.Block, error) { if !ok { return nil, ValueTypeMismatch } - return blocks.NewBlock(bdata) + return blocks.NewBlock(bdata), nil } -func (bs *blockstore) Put(block blocks.Block) error { - return bs.datastore.Put(toDatastoreKey(block.Key()), block.Data) -} - -func toDatastoreKey(k u.Key) ds.Key { - return ds.NewKey(string(k)) +func (bs *blockstore) Put(block *blocks.Block) error { + return bs.datastore.Put(block.Key().DsKey(), block.Data) } diff --git a/blockstore/blockstore_test.go b/blockstore/blockstore_test.go index 4b0909d7547..98c00a9faba 100644 --- a/blockstore/blockstore_test.go +++ b/blockstore/blockstore_test.go @@ -5,8 +5,8 @@ import ( "testing" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) // TODO(brian): TestGetReturnsNil @@ -24,7 +24,7 @@ func TestGetWhenKeyNotPresent(t *testing.T) { func TestPutThenGetBlock(t *testing.T) { bs := NewBlockstore(ds.NewMapDatastore()) - block := testutil.NewBlockOrFail(t, "some data") + block := blocks.NewBlock([]byte("some data")) err := bs.Put(block) if err != nil { @@ -41,10 +41,10 @@ func TestPutThenGetBlock(t *testing.T) { } func TestValueTypeMismatch(t *testing.T) { - block := testutil.NewBlockOrFail(t, "some data") + block := blocks.NewBlock([]byte("some data")) datastore := ds.NewMapDatastore() - datastore.Put(toDatastoreKey(block.Key()), "data that isn't a block!") + datastore.Put(block.Key().DsKey(), "data that isn't a block!") blockstore := NewBlockstore(datastore) diff --git a/cmd/ipfs/add.go b/cmd/ipfs/add.go index 91eb1e6e4c6..5fbe7dd241b 100644 --- a/cmd/ipfs/add.go +++ b/cmd/ipfs/add.go @@ -29,4 +29,9 @@ func init() { cmdIpfsAdd.Flag.Bool("r", false, "add objects recursively") } -var addCmd = MakeCommand("add", []string{"r"}, commands.Add) +var addCmd = makeCommand(command{ + name: "add", + args: 1, + flags: []string{"r"}, + cmdFn: commands.Add, +}) diff --git a/cmd/ipfs/bootstrap.go b/cmd/ipfs/bootstrap.go index 4c0ea9fca13..68a1ad63361 100644 --- a/cmd/ipfs/bootstrap.go +++ b/cmd/ipfs/bootstrap.go @@ -222,10 +222,7 @@ func bootstrapInputToPeers(input []string) ([]*config.BootstrapPeer, error) { return nil, err } - addrS, err = maddr.String() - if err != nil { - return nil, err - } + addrS = maddr.String() } // make sure idS parses as a peer.ID diff --git a/cmd/ipfs/cat.go b/cmd/ipfs/cat.go index d7ab500288a..168a4841e77 100644 --- a/cmd/ipfs/cat.go +++ b/cmd/ipfs/cat.go @@ -18,33 +18,9 @@ var cmdIpfsCat = &commander.Command{ Flag: *flag.NewFlagSet("ipfs-cat", flag.ExitOnError), } -var catCmd = MakeCommand("cat", nil, commands.Cat) - -/* -func catCmd(c *commander.Command, inp []string) error { - if len(inp) < 1 { - u.POut(c.Long) - return nil - } - - conf, err := getConfigDir(c.Parent) - if err != nil { - return err - } - - com := daemon.NewCommand() - com.Command = "cat" - com.Args = inp - - err = daemon.SendCommand(com, conf) - if err != nil { - n, err := localNode(conf, false) - if err != nil { - return err - } - - return commands.Cat(n, com.Args, com.Opts, os.Stdout) - } - return nil -} -*/ +var catCmd = makeCommand(command{ + name: "cat", + args: 1, + flags: nil, + cmdFn: commands.Cat, +}) diff --git a/cmd/ipfs/gen.go b/cmd/ipfs/gen.go index 5c7d1aa40e6..299171550da 100644 --- a/cmd/ipfs/gen.go +++ b/cmd/ipfs/gen.go @@ -1,62 +1,60 @@ package main import ( - "errors" "fmt" "os" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" - "github.com/jbenet/go-ipfs/config" "github.com/jbenet/go-ipfs/core/commands" "github.com/jbenet/go-ipfs/daemon" u "github.com/jbenet/go-ipfs/util" ) -// CommanderFunc is a function that can be passed into the Commander library as +// command is the descriptor of an ipfs daemon command. +// Used with makeCommand to proxy over commands via the daemon. +type command struct { + name string + args int + flags []string + online bool + cmdFn commands.CmdFunc +} + +// commanderFunc is a function that can be passed into the Commander library as // a command handler. Defined here because commander lacks this definition. -type CommanderFunc func(*commander.Command, []string) error +type commanderFunc func(*commander.Command, []string) error -// MakeCommand Wraps a commands.CmdFunc so that it may be safely run by the +// makeCommand Wraps a commands.CmdFunc so that it may be safely run by the // commander library -func MakeCommand(cmdName string, expargs []string, cmdFn commands.CmdFunc) CommanderFunc { +func makeCommand(cmdDesc command) commanderFunc { return func(c *commander.Command, inp []string) error { - if len(inp) < 1 { + if len(inp) < cmdDesc.args { u.POut(c.Long) return nil } - confdir, err := getConfigDir(c.Parent) + confdir, err := getConfigDir(c) if err != nil { return err } - confapi, err := config.ReadConfigKey(confdir+"/config", "Addresses.API") - if err != nil { - return err - } - - apiaddr, ok := confapi.(string) - if !ok { - return errors.New("ApiAddress in config file was not a string") - } - cmd := daemon.NewCommand() - cmd.Command = cmdName + cmd.Command = cmdDesc.name cmd.Args = inp - for _, a := range expargs { + for _, a := range cmdDesc.flags { cmd.Opts[a] = c.Flag.Lookup(a).Value.Get() } - err = daemon.SendCommand(cmd, apiaddr) + + err = daemon.SendCommand(cmd, confdir) if err != nil { - fmt.Printf("Executing command locally: %s", err) + log.Info("Executing command locally: %s", err) // Do locally - n, err := localNode(confdir, false) + n, err := localNode(confdir, cmdDesc.online) if err != nil { - fmt.Println("Local node creation failed.") - return err + return fmt.Errorf("Local node creation failed: %v", err) } - return cmdFn(n, cmd.Args, cmd.Opts, os.Stdout) + return cmdDesc.cmdFn(n, cmd.Args, cmd.Opts, os.Stdout) } return nil } diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index b868f2b91dc..d70b6f3f9f9 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -92,6 +92,12 @@ func initCmd(c *commander.Command, inp []string) error { API: "/ip4/127.0.0.1/tcp/5001", } + // setup the node mount points. + cfg.Mounts = config.Mounts{ + IPFS: "/ipfs", + IPNS: "/ipns", + } + nbits, ok := c.Flag.Lookup("b").Value.Get().(int) if !ok { return errors.New("failed to get bits flag") diff --git a/cmd/ipfs/ipfs.go b/cmd/ipfs/ipfs.go index bb4c3673c04..e3d3629135d 100644 --- a/cmd/ipfs/ipfs.go +++ b/cmd/ipfs/ipfs.go @@ -1,14 +1,18 @@ package main import ( + "errors" "fmt" "os" "runtime/pprof" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" - "github.com/jbenet/go-ipfs/config" + flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + + config "github.com/jbenet/go-ipfs/config" core "github.com/jbenet/go-ipfs/core" + daemon "github.com/jbenet/go-ipfs/daemon" u "github.com/jbenet/go-ipfs/util" ) @@ -50,11 +54,16 @@ Use "ipfs help " for more information about a command. cmdIpfsMount, cmdIpfsInit, cmdIpfsServe, + cmdIpfsRun, + cmdIpfsName, cmdIpfsBootstrap, }, Flag: *flag.NewFlagSet("ipfs", flag.ExitOnError), } +// log is the command logger +var log = u.Logger("cmd/ipfs") + func init() { config, err := config.PathRoot() if err != nil { @@ -69,16 +78,24 @@ func ipfsCmd(c *commander.Command, args []string) error { } func main() { - u.Debug = true - ofi, err := os.Create("cpu.prof") - if err != nil { - fmt.Println(err) - return + u.Debug = false + + // setup logging + // u.SetupLogging() done in an init() block now. + + // if debugging, setup profiling. + if u.Debug { + ofi, err := os.Create("cpu.prof") + if err != nil { + fmt.Println(err) + return + } + pprof.StartCPUProfile(ofi) + defer ofi.Close() + defer pprof.StopCPUProfile() } - pprof.StartCPUProfile(ofi) - defer ofi.Close() - defer pprof.StopCPUProfile() - err = CmdIpfs.Dispatch(os.Args[1:]) + + err := CmdIpfs.Dispatch(os.Args[1:]) if err != nil { if len(err.Error()) > 0 { fmt.Fprintf(os.Stderr, "ipfs %s: %v\n", os.Args[1], err) @@ -88,6 +105,7 @@ func main() { return } +// localNode constructs a node func localNode(confdir string, online bool) (*core.IpfsNode, error) { filename, err := config.Filename(confdir) if err != nil { @@ -132,3 +150,54 @@ func getConfig(c *commander.Command) (*config.Config, error) { return config.Load(filename) } + +// cmdContext is a wrapper structure that keeps a node, a daemonlistener, and +// a config directory together. These three are needed for most commands. +type cmdContext struct { + node *core.IpfsNode + daemon *daemon.DaemonListener + configDir string +} + +// setupCmdContext initializes a cmdContext structure from a given command. +func setupCmdContext(c *commander.Command, online bool) (cc cmdContext, err error) { + rootCmd := c + for ; rootCmd.Parent != nil; rootCmd = c.Parent { + } + + cc.configDir, err = getConfigDir(rootCmd) + if err != nil { + return + } + + cc.node, err = localNode(cc.configDir, online) + if err != nil { + return + } + + cc.daemon, err = setupDaemon(cc.configDir, cc.node) + if err != nil { + return + } + + return +} + +// setupDaemon sets up the daemon corresponding to given node. +func setupDaemon(confdir string, node *core.IpfsNode) (*daemon.DaemonListener, error) { + if node.Config.Addresses.API == "" { + return nil, errors.New("no config.Addresses.API endpoint supplied") + } + + maddr, err := ma.NewMultiaddr(node.Config.Addresses.API) + if err != nil { + return nil, err + } + + dl, err := daemon.NewDaemonListener(node, maddr, confdir) + if err != nil { + return nil, err + } + go dl.Listen() + return dl, nil +} diff --git a/cmd/ipfs/ls.go b/cmd/ipfs/ls.go index aed286deafc..0a2e8aff9cd 100644 --- a/cmd/ipfs/ls.go +++ b/cmd/ipfs/ls.go @@ -21,4 +21,9 @@ var cmdIpfsLs = &commander.Command{ Flag: *flag.NewFlagSet("ipfs-ls", flag.ExitOnError), } -var lsCmd = MakeCommand("ls", nil, commands.Ls) +var lsCmd = makeCommand(command{ + name: "ls", + args: 1, + flags: nil, + cmdFn: commands.Ls, +}) diff --git a/cmd/ipfs/mount_unix.go b/cmd/ipfs/mount_unix.go index 58523230a56..84fbc9cf61a 100644 --- a/cmd/ipfs/mount_unix.go +++ b/cmd/ipfs/mount_unix.go @@ -3,16 +3,14 @@ package main import ( - "errors" "fmt" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - "github.com/jbenet/go-ipfs/daemon" + core "github.com/jbenet/go-ipfs/core" + ipns "github.com/jbenet/go-ipfs/fuse/ipns" rofs "github.com/jbenet/go-ipfs/fuse/readonly" - u "github.com/jbenet/go-ipfs/util" ) var cmdIpfsMount = &commander.Command{ @@ -29,42 +27,68 @@ var cmdIpfsMount = &commander.Command{ Flag: *flag.NewFlagSet("ipfs-mount", flag.ExitOnError), } +func init() { + cmdIpfsMount.Flag.String("f", "", "specify a mountpoint for ipfs") + cmdIpfsMount.Flag.String("n", "", "specify a mountpoint for ipns") +} + func mountCmd(c *commander.Command, inp []string) error { - if len(inp) < 1 || len(inp[0]) == 0 { - u.POut(c.Long) - return nil - } - conf, err := getConfigDir(c.Parent) + cc, err := setupCmdContext(c, true) if err != nil { - fmt.Println("Couldnt get config dir") return err } - n, err := localNode(conf, true) - if err != nil { - fmt.Println("Local node creation failed.") - return err + defer cc.daemon.Close() + + // update fsdir with flag. + fsdir := cc.node.Config.Mounts.IPFS + if val, ok := c.Flag.Lookup("f").Value.Get().(string); ok && val != "" { + fsdir = val } + fsdone := mountIpfs(cc.node, fsdir) - // launch the API RPC endpoint. - if n.Config.Addresses.API == "" { - return errors.New("no config.RPCAddress endpoint supplied") + // get default mount points + nsdir := cc.node.Config.Mounts.IPNS + if val, ok := c.Flag.Lookup("n").Value.Get().(string); ok && val != "" { + nsdir = val } + nsdone := mountIpns(cc.node, nsdir, fsdir) - maddr, err := ma.NewMultiaddr(n.Config.Addresses.API) - if err != nil { - return err + // wait till mounts are done. + err1 := <-fsdone + err2 := <-nsdone + + if err1 != nil { + return err1 } + return err2 +} - dl, err := daemon.NewDaemonListener(n, maddr, conf) - if err != nil { - fmt.Println("Failed to create daemon listener.") - return err +func mountIpfs(node *core.IpfsNode, fsdir string) <-chan error { + done := make(chan error) + fmt.Printf("mounting ipfs at %s\n", fsdir) + + go func() { + err := rofs.Mount(node, fsdir) + done <- err + close(done) + }() + + return done +} + +func mountIpns(node *core.IpfsNode, nsdir, fsdir string) <-chan error { + if nsdir == "" { + return nil } - go dl.Listen() - defer dl.Close() + done := make(chan error) + fmt.Printf("mounting ipns at %s\n", nsdir) + + go func() { + err := ipns.Mount(node, nsdir, fsdir) + done <- err + close(done) + }() - mp := inp[0] - fmt.Printf("Mounting at %s\n", mp) - return rofs.Mount(n, mp) + return done } diff --git a/cmd/ipfs/name.go b/cmd/ipfs/name.go new file mode 100644 index 00000000000..a2ef78e5067 --- /dev/null +++ b/cmd/ipfs/name.go @@ -0,0 +1,57 @@ +package main + +import ( + "fmt" + + flag "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + commander "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmdIpfsName = &commander.Command{ + UsageLine: "name [publish | resolve]", + Short: "ipfs namespace (ipns) tool", + Long: `ipfs name - Get/Set ipfs config values. + + ipfs name publish [] - Assign the to + ipfs name resolve [] - Resolve the value of + +IPNS is a PKI namespace, where names are the hashes of public keys, and +the private key enables publishing new (signed) values. In both publish +and resolve, the default value of is your own identity public key. + + +Examples: + +Publish a to your identity name: + + > ipfs name publish QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Publish a to another public key: + + > ipfs name publish QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Resolve the value of your identity: + + > ipfs name resolve + QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Resolve te value of another name: + + > ipfs name resolve QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n + QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +`, + Run: addCmd, + Flag: *flag.NewFlagSet("ipfs-name", flag.ExitOnError), + Subcommands: []*commander.Command{ + cmdIpfsPub, + cmdIpfsResolve, + }, +} + +func nameCmd(c *commander.Command, args []string) error { + fmt.Println(c.Long) + return nil +} diff --git a/cmd/ipfs/pin.go b/cmd/ipfs/pin.go new file mode 100644 index 00000000000..0077597598b --- /dev/null +++ b/cmd/ipfs/pin.go @@ -0,0 +1,31 @@ +package main + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/core/commands" +) + +var cmdIpfsPin = &commander.Command{ + UsageLine: "pin", + Short: "pin an ipfs object to local storage.", + Long: `ipfs pin - pin ipfs object to local storage. + + Retrieves the object named by and stores it locally + on disk. +`, + Run: pinCmd, + Flag: *flag.NewFlagSet("ipfs-pin", flag.ExitOnError), +} + +func init() { + cmdIpfsPin.Flag.Bool("r", false, "pin objects recursively") + cmdIpfsPin.Flag.Int("d", 1, "recursive depth") +} + +var pinCmd = makeCommand(command{ + name: "pin", + args: 1, + flags: []string{"r", "d"}, + cmdFn: commands.Pin, +}) diff --git a/cmd/ipfs/publish.go b/cmd/ipfs/publish.go new file mode 100644 index 00000000000..041da0028b2 --- /dev/null +++ b/cmd/ipfs/publish.go @@ -0,0 +1,41 @@ +package main + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/core/commands" +) + +var cmdIpfsPub = &commander.Command{ + UsageLine: "publish", + Short: "publish a to ipns.", + Long: `ipfs publish [] - publish a to ipns. + +IPNS is a PKI namespace, where names are the hashes of public keys, and +the private key enables publishing new (signed) values. In publish, the +default value of is your own identity public key. + +Examples: + +Publish a to your identity name: + + > ipfs name publish QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Publish a to another public key: + + > ipfs name publish QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + published name QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n to QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +`, + Run: pubCmd, + Flag: *flag.NewFlagSet("ipfs-publish", flag.ExitOnError), +} + +var pubCmd = makeCommand(command{ + name: "publish", + args: 1, + flags: nil, + online: true, + cmdFn: commands.Publish, +}) diff --git a/cmd/ipfs/refs.go b/cmd/ipfs/refs.go index f86e51dbf94..b3aaf85fc33 100644 --- a/cmd/ipfs/refs.go +++ b/cmd/ipfs/refs.go @@ -28,4 +28,9 @@ func init() { cmdIpfsRefs.Flag.Bool("u", false, "unique: list each ref only once") } -var refCmd = MakeCommand("refs", []string{"r", "u"}, commands.Refs) +var refCmd = makeCommand(command{ + name: "refs", + args: 1, + flags: []string{"r", "u"}, + cmdFn: commands.Refs, +}) diff --git a/cmd/ipfs/resolve.go b/cmd/ipfs/resolve.go new file mode 100644 index 00000000000..9f5107ff867 --- /dev/null +++ b/cmd/ipfs/resolve.go @@ -0,0 +1,42 @@ +package main + +import ( + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" + "github.com/jbenet/go-ipfs/core/commands" +) + +var cmdIpfsResolve = &commander.Command{ + UsageLine: "resolve", + Short: "resolve an ipns name to a ", + Long: `ipfs resolve [] - Resolve an ipns name to a . + +IPNS is a PKI namespace, where names are the hashes of public keys, and +the private key enables publishing new (signed) values. In resolve, the +default value of is your own identity public key. + + +Examples: + +Resolve the value of your identity: + + > ipfs name resolve + QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +Resolve te value of another name: + + > ipfs name resolve QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n + QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + +`, + Run: resolveCmd, + Flag: *flag.NewFlagSet("ipfs-resolve", flag.ExitOnError), +} + +var resolveCmd = makeCommand(command{ + name: "resolve", + args: 0, + flags: nil, + online: true, + cmdFn: commands.Resolve, +}) diff --git a/cmd/ipfs/run.go b/cmd/ipfs/run.go new file mode 100644 index 00000000000..b651ca9d9dd --- /dev/null +++ b/cmd/ipfs/run.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" +) + +var cmdIpfsRun = &commander.Command{ + UsageLine: "run", + Short: "run local ifps node.", + Long: `run a local ipfs node with no other interface. +`, + Run: runCmd, + Flag: *flag.NewFlagSet("ipfs-run", flag.ExitOnError), +} + +func runCmd(c *commander.Command, inp []string) error { + cc, err := setupCmdContext(c, true) + if err != nil { + return err + } + + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, + syscall.SIGTERM, syscall.SIGQUIT) + + // wait until we get a signal to exit. + <-sigc + + cc.daemon.Close() + return nil +} diff --git a/cmd/ipfs/serve.go b/cmd/ipfs/serve.go index b4fb48c1db8..fda3b3f22d8 100644 --- a/cmd/ipfs/serve.go +++ b/cmd/ipfs/serve.go @@ -1,14 +1,12 @@ package main import ( - "errors" "fmt" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/gonuts/flag" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - "github.com/jbenet/go-ipfs/daemon" h "github.com/jbenet/go-ipfs/server/http" ) @@ -16,58 +14,36 @@ var cmdIpfsServe = &commander.Command{ UsageLine: "serve", Short: "Serve an interface to ipfs", Subcommands: []*commander.Command{ - cmdIpfsServeHttp, + cmdIpfsServeHTTP, }, Flag: *flag.NewFlagSet("ipfs-serve", flag.ExitOnError), } -var cmdIpfsServeHttp = &commander.Command{ +var cmdIpfsServeHTTP = &commander.Command{ UsageLine: "http", Short: "Serve an HTTP API", Long: `ipfs serve http - Serve an http gateway into ipfs.`, - Run: serveHttpCmd, + Run: serveHTTPCmd, Flag: *flag.NewFlagSet("ipfs-serve-http", flag.ExitOnError), } func init() { - cmdIpfsServeHttp.Flag.String("address", "/ip4/127.0.0.1/tcp/8080", "Listen Address") + cmdIpfsServeHTTP.Flag.String("address", "/ip4/127.0.0.1/tcp/8080", "Listen Address") } -func serveHttpCmd(c *commander.Command, _ []string) error { - conf, err := getConfigDir(c.Parent.Parent) +func serveHTTPCmd(c *commander.Command, _ []string) error { + cc, err := setupCmdContext(c, true) if err != nil { return err } - - n, err := localNode(conf, true) - if err != nil { - return err - } - - // launch the API RPC endpoint. - if n.Config.Addresses.API == "" { - return errors.New("no config.RPCAddress endpoint supplied") - } - - maddr, err := ma.NewMultiaddr(n.Config.Addresses.API) - if err != nil { - return err - } - - dl, err := daemon.NewDaemonListener(n, maddr, conf) - if err != nil { - fmt.Println("Failed to create daemon listener.") - return err - } - go dl.Listen() - defer dl.Close() + defer cc.daemon.Close() address := c.Flag.Lookup("address").Value.Get().(string) - maddr, err = ma.NewMultiaddr(address) + maddr, err := ma.NewMultiaddr(address) if err != nil { return err } fmt.Printf("Serving on %s\n", address) - return h.Serve(maddr, n) + return h.Serve(maddr, cc.node) } diff --git a/config/config.go b/config/config.go index 4877842929b..5acc23c4d84 100644 --- a/config/config.go +++ b/config/config.go @@ -29,6 +29,12 @@ type Addresses struct { API string // address for the local API (RPC) } +// Mounts stores the (string) mount points +type Mounts struct { + IPFS string + IPNS string +} + // BootstrapPeer is a peer used to bootstrap the network. type BootstrapPeer struct { Address string @@ -44,12 +50,20 @@ type Config struct { Identity Identity // local node's peer identity Datastore Datastore // local node's storage Addresses Addresses // local node's addresses + Mounts Mounts // local node's mount points Bootstrap []*BootstrapPeer // local nodes's bootstrap peers } +// DefaultPathRoot is the path to the default config dir location. const DefaultPathRoot = "~/.go-ipfs" + +// DefaultConfigFile is the filename of the configuration file const DefaultConfigFile = "config" + +// DefaultDataStoreDirectory is the directory to store all the local IPFS data. const DefaultDataStoreDirectory = "datastore" + +// EnvDir is the environment variable used to change the path root. const EnvDir = "IPFS_DIR" // PathRoot returns the default configuration root directory @@ -69,13 +83,11 @@ func Path(configroot, extension string) (string, error) { dir, err := PathRoot() if err != nil { return "", err - } else { - return filepath.Join(dir, extension), nil } + return filepath.Join(dir, extension), nil - } else { - return filepath.Join(configroot, extension), nil } + return filepath.Join(configroot, extension), nil } // DataStorePath returns the default data store path given a configuration root diff --git a/core/commands/add.go b/core/commands/add.go index 48a6ccae9f8..e51c4b7982c 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -11,6 +11,7 @@ import ( "github.com/jbenet/go-ipfs/core" "github.com/jbenet/go-ipfs/importer" dag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" u "github.com/jbenet/go-ipfs/util" ) @@ -36,7 +37,7 @@ func Add(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr } // Add the file - nd, err := AddPath(n, path, depth) + _, err = AddPath(n, path, depth) if err != nil { if err == ErrDepthLimitExceeded && depth == 1 { err = errors.New("use -r to recursively add directories") @@ -45,12 +46,13 @@ func Add(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr } // get the key to print it - k, err := nd.Key() - if err != nil { - return fmt.Errorf("addFile error: %v", err) - } - - fmt.Fprintf(out, "added %s %s\n", k.Pretty(), path) + // k, err := nd.Key() + // if err != nil { + // return fmt.Errorf("addFile error: %v", err) + // } + // + // Commenting out of here, because it's already in addNode below. + // fmt.Fprintf(out, "added %s %s\n", k, path) } return nil } @@ -74,7 +76,7 @@ func AddPath(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) { } func addDir(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) { - tree := &dag.Node{Data: dag.FolderPBData()} + tree := &dag.Node{Data: ft.FolderPBData()} files, err := ioutil.ReadDir(fpath) if err != nil { @@ -103,6 +105,16 @@ func addFile(n *core.IpfsNode, fpath string, depth int) (*dag.Node, error) { return nil, err } + k, err := root.Key() + if err != nil { + return nil, err + } + + log.Info("Adding file: %s = %s\n", fpath, k) + for _, l := range root.Links { + log.Info("SubBlock: %s\n", l.Hash.B58String()) + } + return root, addNode(n, root, fpath) } @@ -119,8 +131,8 @@ func addNode(n *core.IpfsNode, nd *dag.Node, fpath string) error { return err } - u.POut("added %s %s\n", k.Pretty(), fpath) + u.POut("added %s %s\n", k, fpath) // ensure we keep it. atm no-op - return n.PinDagNode(nd) + return n.PinDagNodeRecursively(nd, -1) } diff --git a/core/commands/cat.go b/core/commands/cat.go index 5cb3f9651c4..7ebae801c9b 100644 --- a/core/commands/cat.go +++ b/core/commands/cat.go @@ -5,7 +5,7 @@ import ( "io" "github.com/jbenet/go-ipfs/core" - mdag "github.com/jbenet/go-ipfs/merkledag" + uio "github.com/jbenet/go-ipfs/unixfs/io" ) func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error { @@ -15,7 +15,7 @@ func Cat(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Wr return fmt.Errorf("catFile error: %v", err) } - read, err := mdag.NewDagReader(dagnode, n.DAG) + read, err := uio.NewDagReader(dagnode, n.DAG) if err != nil { return fmt.Errorf("cat error: %v", err) } diff --git a/core/commands/commands.go b/core/commands/commands.go index 5bef54a7b18..0c2541146b8 100644 --- a/core/commands/commands.go +++ b/core/commands/commands.go @@ -4,6 +4,9 @@ import ( "io" "github.com/jbenet/go-ipfs/core" + u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("commands") + type CmdFunc func(*core.IpfsNode, []string, map[string]interface{}, io.Writer) error diff --git a/core/commands/pin.go b/core/commands/pin.go index 8594ee9489c..40b452ffae8 100644 --- a/core/commands/pin.go +++ b/core/commands/pin.go @@ -8,13 +8,28 @@ import ( ) func Pin(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error { + + // set recursive flag + recursive, _ := opts["r"].(bool) // false if cast fails. + + // if recursive, set depth flag + depth := 1 // default (non recursive) + if d, ok := opts["d"].(int); recursive && ok { + depth = d + } + if depth < -1 { + return fmt.Errorf("ipfs pin: called with invalid depth: %v", depth) + } + + fmt.Printf("recursive, depth: %v, %v\n", recursive, depth) + for _, fn := range args { dagnode, err := n.Resolver.ResolvePath(fn) if err != nil { return fmt.Errorf("pin error: %v", err) } - err = n.PinDagNode(dagnode) + err = n.PinDagNodeRecursively(dagnode, depth) if err != nil { return fmt.Errorf("pin: %v", err) } diff --git a/core/commands/publish.go b/core/commands/publish.go new file mode 100644 index 00000000000..3d58ab43127 --- /dev/null +++ b/core/commands/publish.go @@ -0,0 +1,53 @@ +package commands + +import ( + "errors" + "fmt" + "io" + + "github.com/jbenet/go-ipfs/core" + u "github.com/jbenet/go-ipfs/util" + + nsys "github.com/jbenet/go-ipfs/namesys" +) + +func Publish(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error { + log.Debug("Begin Publish") + + if n.Identity == nil { + return errors.New("Identity not loaded!") + } + + // name := "" + ref := "" + + switch len(args) { + case 2: + // name = args[0] + ref = args[1] + return errors.New("keychains not yet implemented") + case 1: + // name = n.Identity.ID.String() + ref = args[0] + + default: + return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args)) + } + + // later, n.Keychain.Get(name).PrivKey + k := n.Identity.PrivKey + + pub := nsys.NewRoutingPublisher(n.Routing) + err := pub.Publish(k, ref) + if err != nil { + return err + } + + hash, err := k.GetPublic().Hash() + if err != nil { + return err + } + fmt.Fprintf(out, "published name %s to %s\n", u.Key(hash), ref) + + return nil +} diff --git a/core/commands/resolve.go b/core/commands/resolve.go new file mode 100644 index 00000000000..7307dc2654d --- /dev/null +++ b/core/commands/resolve.go @@ -0,0 +1,35 @@ +package commands + +import ( + "errors" + "fmt" + "io" + + "github.com/jbenet/go-ipfs/core" +) + +func Resolve(n *core.IpfsNode, args []string, opts map[string]interface{}, out io.Writer) error { + + name := "" + + switch len(args) { + case 1: + name = args[0] + case 0: + if n.Identity == nil { + return errors.New("Identity not loaded!") + } + name = n.Identity.ID.String() + + default: + return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args)) + } + + res, err := n.Namesys.Resolve(name) + if err != nil { + return err + } + + fmt.Fprintf(out, "%s\n", res) + return nil +} diff --git a/core/core.go b/core/core.go index f641e8b2d93..4e764649b16 100644 --- a/core/core.go +++ b/core/core.go @@ -16,6 +16,7 @@ import ( exchange "github.com/jbenet/go-ipfs/exchange" bitswap "github.com/jbenet/go-ipfs/exchange/bitswap" merkledag "github.com/jbenet/go-ipfs/merkledag" + namesys "github.com/jbenet/go-ipfs/namesys" inet "github.com/jbenet/go-ipfs/net" mux "github.com/jbenet/go-ipfs/net/mux" netservice "github.com/jbenet/go-ipfs/net/service" @@ -26,6 +27,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("core") + // IpfsNode is IPFS Core module. It represents an IPFS instance. type IpfsNode struct { @@ -39,7 +42,7 @@ type IpfsNode struct { Peerstore peer.Peerstore // the local datastore - Datastore ds.Datastore + Datastore ds.ThreadSafeDatastore // the network message stream Network inet.Network @@ -60,7 +63,7 @@ type IpfsNode struct { Resolver *path.Resolver // the name system, resolves paths to hashes - // Namesys *namesys.Namesys + Namesys namesys.NameSystem } // NewIpfsNode constructs a new IpfsNode based on the given config. @@ -142,6 +145,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) { } dag := &merkledag.DAGService{Blocks: bs} + ns := namesys.NewNameSystem(route) success = true return &IpfsNode{ @@ -154,6 +158,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) { Exchange: exchangeSession, Identity: local, Routing: route, + Namesys: ns, }, nil } @@ -167,14 +172,14 @@ func initIdentity(cfg *config.Config, online bool) (*peer.Peer, error) { } // address is optional - var addresses []*ma.Multiaddr + var addresses []ma.Multiaddr if len(cfg.Addresses.Swarm) > 0 { maddr, err := ma.NewMultiaddr(cfg.Addresses.Swarm) if err != nil { return nil, err } - addresses = []*ma.Multiaddr{maddr} + addresses = []ma.Multiaddr{maddr} } var ( @@ -232,8 +237,13 @@ func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerst } } -// PinDagNode ensures a given node is stored persistently locally. +// PinDagNode ensures a given node is stored persistently locally func (n *IpfsNode) PinDagNode(nd *merkledag.Node) error { - u.DOut("Pinning node. Currently No-Op\n") + return n.PinDagNodeRecursively(nd, 1) +} + +// PinDagNodeRecursively ensures a given node is stored persistently locally +func (n *IpfsNode) PinDagNodeRecursively(nd *merkledag.Node, depth int) error { + u.DOut("Pinning node recursively. Currently No-Op\n") return nil } diff --git a/core/datastore.go b/core/datastore.go index 9105adaabeb..da73285f1ea 100644 --- a/core/datastore.go +++ b/core/datastore.go @@ -4,11 +4,16 @@ import ( "fmt" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + fsds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/fs" + ktds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/keytransform" lds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/leveldb" + syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync" + config "github.com/jbenet/go-ipfs/config" + u "github.com/jbenet/go-ipfs/util" ) -func makeDatastore(cfg config.Datastore) (ds.Datastore, error) { +func makeDatastore(cfg config.Datastore) (ds.ThreadSafeDatastore, error) { if len(cfg.Type) == 0 { return nil, fmt.Errorf("config datastore.type required") } @@ -16,14 +21,24 @@ func makeDatastore(cfg config.Datastore) (ds.Datastore, error) { switch cfg.Type { case "leveldb": return makeLevelDBDatastore(cfg) + case "memory": - return ds.NewMapDatastore(), nil + return syncds.MutexWrap(ds.NewMapDatastore()), nil + + case "fs": + log.Warning("using fs.Datastore at .datastore for testing.") + d, err := fsds.NewDatastore(".datastore") // for testing!! + if err != nil { + return nil, err + } + ktd := ktds.WrapDatastore(d, u.DsKeyB58Encode) + return syncds.MutexWrap(ktd), nil } return nil, fmt.Errorf("Unknown datastore type: %s", cfg.Type) } -func makeLevelDBDatastore(cfg config.Datastore) (ds.Datastore, error) { +func makeLevelDBDatastore(cfg config.Datastore) (ds.ThreadSafeDatastore, error) { if len(cfg.Path) == 0 { return nil, fmt.Errorf("config datastore.path required for leveldb") } diff --git a/core/mock.go b/core/mock.go new file mode 100644 index 00000000000..9eececa3dac --- /dev/null +++ b/core/mock.go @@ -0,0 +1,52 @@ +package core + +import ( + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go/sync" + bs "github.com/jbenet/go-ipfs/blockservice" + ci "github.com/jbenet/go-ipfs/crypto" + mdag "github.com/jbenet/go-ipfs/merkledag" + nsys "github.com/jbenet/go-ipfs/namesys" + path "github.com/jbenet/go-ipfs/path" + "github.com/jbenet/go-ipfs/peer" + mdht "github.com/jbenet/go-ipfs/routing/mock" +) + +func NewMockNode() (*IpfsNode, error) { + nd := new(IpfsNode) + + //Generate Identity + nd.Identity = &peer.Peer{ID: []byte("TESTING")} + pk, sk, err := ci.GenerateKeyPair(ci.RSA, 1024) + if err != nil { + return nil, err + } + nd.Identity.PrivKey = pk + nd.Identity.PubKey = sk + + // Temp Datastore + dstore := ds.NewMapDatastore() + nd.Datastore = syncds.MutexWrap(dstore) + + // Routing + dht := mdht.NewMockRouter(nd.Identity, nd.Datastore) + nd.Routing = dht + + // Bitswap + //?? + + bserv, err := bs.NewBlockService(nd.Datastore, nil) + if err != nil { + return nil, err + } + + nd.DAG = &mdag.DAGService{bserv} + + // Namespace resolver + nd.Namesys = nsys.NewNameSystem(dht) + + // Path resolver + nd.Resolver = &path.Resolver{nd.DAG} + + return nd, nil +} diff --git a/crypto/key.go b/crypto/key.go index f0a35c698ab..0ed39c741a1 100644 --- a/crypto/key.go +++ b/crypto/key.go @@ -3,6 +3,7 @@ package crypto import ( "bytes" "errors" + "fmt" "crypto/elliptic" "crypto/hmac" @@ -12,9 +13,10 @@ import ( "crypto/sha256" "crypto/sha512" "hash" - "math/big" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + u "github.com/jbenet/go-ipfs/util" ) var ErrBadKeyType = errors.New("invalid or unsupported key type") @@ -27,6 +29,9 @@ type Key interface { // Bytes returns a serialized, storeable representation of this key Bytes() ([]byte, error) + // Hash returns the hash of this key + Hash() ([]byte, error) + // Equals checks whether two PubKeys are the same Equals(Key) bool } @@ -91,25 +96,16 @@ func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) { return nil, nil, err } - var pubKey bytes.Buffer - pubKey.Write(x.Bytes()) - pubKey.Write(y.Bytes()) + pubKey := elliptic.Marshal(curve, x, y) + u.PErr("GenerateEKeyPair %d\n", len(pubKey)) done := func(theirPub []byte) ([]byte, error) { // Verify and unpack node's public key. - curveSize := curve.Params().BitSize - - if len(theirPub) != (curveSize / 4) { - return nil, errors.New("Malformed public key.") + x, y := elliptic.Unmarshal(curve, theirPub) + if x == nil { + return nil, fmt.Errorf("Malformed public key: %d %v", len(theirPub), theirPub) } - bound := (curveSize / 8) - x := big.NewInt(0) - y := big.NewInt(0) - - x.SetBytes(theirPub[0:bound]) - y.SetBytes(theirPub[bound : bound*2]) - if !curve.IsOnCurve(x, y) { return nil, errors.New("Invalid public key.") } @@ -120,7 +116,7 @@ func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) { return secret.Bytes(), nil } - return pubKey.Bytes(), done, nil + return pubKey, done, nil } // Generates a set of keys for each party by stretching the shared key. @@ -246,3 +242,12 @@ func KeyEqual(k1, k2 Key) bool { b2, err2 := k2.Bytes() return bytes.Equal(b1, b2) && err1 == err2 } + +// KeyHash hashes a key. +func KeyHash(k Key) ([]byte, error) { + kb, err := k.Bytes() + if err != nil { + return nil, err + } + return u.Hash(kb), nil +} diff --git a/crypto/key_test.go b/crypto/key_test.go index 13c94215e80..112b99bee57 100644 --- a/crypto/key_test.go +++ b/crypto/key_test.go @@ -92,3 +92,7 @@ func (pk testkey) Bytes() ([]byte, error) { func (pk testkey) Equals(k Key) bool { return KeyEqual(pk, k) } + +func (pk testkey) Hash() ([]byte, error) { + return KeyHash(pk) +} diff --git a/crypto/rsa.go b/crypto/rsa.go index e582b59c297..bad04bfb53d 100644 --- a/crypto/rsa.go +++ b/crypto/rsa.go @@ -46,6 +46,10 @@ func (pk *RsaPublicKey) Equals(k Key) bool { return KeyEqual(pk, k) } +func (pk *RsaPublicKey) Hash() ([]byte, error) { + return KeyHash(pk) +} + func (sk *RsaPrivateKey) GenSecret() []byte { buf := make([]byte, 16) rand.Read(buf) @@ -75,6 +79,10 @@ func (sk *RsaPrivateKey) Equals(k Key) bool { return KeyEqual(sk, k) } +func (sk *RsaPrivateKey) Hash() ([]byte, error) { + return KeyHash(sk) +} + func UnmarshalRsaPrivateKey(b []byte) (*RsaPrivateKey, error) { sk, err := x509.ParsePKCS1PrivateKey(b) if err != nil { diff --git a/crypto/spipe/handshake.go b/crypto/spipe/handshake.go index f617c75b3aa..2d6f2ea9f19 100644 --- a/crypto/spipe/handshake.go +++ b/crypto/spipe/handshake.go @@ -49,7 +49,7 @@ func (s *SecurePipe) handshake() error { return err } - // u.DOut("handshake: %s <--> %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty()) + // u.DOut("handshake: %s <--> %s\n", s.local, s.remote) myPubKey, err := s.local.PubKey.Bytes() if err != nil { return err @@ -101,7 +101,7 @@ func (s *SecurePipe) handshake() error { if err != nil { return err } - u.DOut("[%s] Remote Peer Identified as %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty()) + u.DOut("%s Remote Peer Identified as %s\n", s.local, s.remote) exchange, err := selectBest(SupportedExchanges, proposeResp.GetExchanges()) if err != nil { @@ -119,7 +119,7 @@ func (s *SecurePipe) handshake() error { } // u.POut("Selected %s %s %s\n", exchange, cipherType, hashType) - epubkey, done, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey + epubkey, genSharedKey, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey var handshake bytes.Buffer // Gather corpus to sign. handshake.Write(encoded) @@ -163,7 +163,7 @@ func (s *SecurePipe) handshake() error { theirHandshake.Write(encoded) theirHandshake.Write(exchangeResp.GetEpubkey()) - // u.POut("Remote Peer Identified as %s\n", s.remote.ID.Pretty()) + // u.POut("Remote Peer Identified as %s\n", s.remote) ok, err := s.remote.PubKey.Verify(theirHandshake.Bytes(), exchangeResp.GetSignature()) if err != nil { return err @@ -173,7 +173,7 @@ func (s *SecurePipe) handshake() error { return errors.New("Bad signature!") } - secret, err := done(exchangeResp.GetEpubkey()) + secret, err := genSharedKey(exchangeResp.GetEpubkey()) if err != nil { return err } @@ -205,7 +205,7 @@ func (s *SecurePipe) handshake() error { return errors.New("Negotiation failed.") } - u.DOut("[%s] handshake: Got node id: %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty()) + u.DOut("%s handshake: Got node id: %s\n", s.local, s.remote) return nil } @@ -229,10 +229,11 @@ func (s *SecurePipe) handleSecureIn(hashType string, tIV, tCKey, tMKey []byte) { for { data, ok := <-s.insecure.In if !ok { + close(s.Duplex.In) return } - // u.DOut("[peer %s] secure in [from = %s] %d\n", s.local.ID.Pretty(), s.remote.ID.Pretty(), len(data)) + // u.DOut("[peer %s] secure in [from = %s] %d\n", s.local, s.remote, len(data)) if len(data) <= macSize { continue } @@ -280,7 +281,7 @@ func (s *SecurePipe) handleSecureOut(hashType string, mIV, mCKey, mMKey []byte) copy(buff[len(data):], myMac.Sum(nil)) myMac.Reset() - // u.DOut("[peer %s] secure out [to = %s] %d\n", s.local.ID.Pretty(), s.remote.ID.Pretty(), len(buff)) + // u.DOut("[peer %s] secure out [to = %s] %d\n", s.local, s.remote, len(buff)) s.insecure.Out <- buff } } @@ -291,25 +292,15 @@ func IDFromPubKey(pk ci.PubKey) (peer.ID, error) { if err != nil { return nil, err } - hash, err := u.Hash(b) - if err != nil { - return nil, err - } + hash := u.Hash(b) return peer.ID(hash), nil } // Determines which algorithm to use. Note: f(a, b) = f(b, a) func selectBest(myPrefs, theirPrefs string) (string, error) { // Person with greatest hash gets first choice. - myHash, err := u.Hash([]byte(myPrefs)) - if err != nil { - return "", err - } - - theirHash, err := u.Hash([]byte(theirPrefs)) - if err != nil { - return "", err - } + myHash := u.Hash([]byte(myPrefs)) + theirHash := u.Hash([]byte(theirPrefs)) cmp := bytes.Compare(myHash, theirHash) var firstChoiceArr, secChoiceArr []string @@ -367,7 +358,7 @@ func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (*peer.Peer, error) // let's verify ID if !npeer.ID.Equal(rid) { e := "Expected peer.ID does not match sent pubkey's hash: %v - %v" - return nil, fmt.Errorf(e, npeer.ID.Pretty(), rid.Pretty()) + return nil, fmt.Errorf(e, npeer, rid) } if npeer.PubKey == nil { @@ -380,7 +371,7 @@ func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (*peer.Peer, error) // this shouldn't ever happen, given we hashed, etc, but it could mean // expected code (or protocol) invariants violated. if !npeer.PubKey.Equals(rpk) { - return nil, fmt.Errorf("WARNING: PubKey mismatch: %v", npeer.ID.Pretty()) + return nil, fmt.Errorf("WARNING: PubKey mismatch: %v", npeer) } return npeer, nil } diff --git a/crypto/spipe/pipe.go b/crypto/spipe/pipe.go index 8d0db0d5dbe..7f9ccc30f62 100644 --- a/crypto/spipe/pipe.go +++ b/crypto/spipe/pipe.go @@ -82,6 +82,5 @@ func (s *SecurePipe) Close() error { s.cancel() s.cancel = nil - close(s.In) return nil } diff --git a/daemon/daemon.go b/daemon/daemon.go index 76dfa06a060..45ac49e681d 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -9,7 +9,6 @@ import ( "path" "sync" - logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging" core "github.com/jbenet/go-ipfs/core" "github.com/jbenet/go-ipfs/core/commands" u "github.com/jbenet/go-ipfs/util" @@ -18,7 +17,7 @@ import ( ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ) -var log = logging.MustGetLogger("daemon") +var log = u.Logger("daemon") // LockFile is the filename of the daemon lock, relative to config dir const LockFile = "daemon.lock" @@ -40,7 +39,7 @@ type Command struct { Opts map[string]interface{} } -func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir string) (*DaemonListener, error) { +func NewDaemonListener(ipfsnode *core.IpfsNode, addr ma.Multiaddr, confdir string) (*DaemonListener, error) { var err error confdir, err = u.TildeExpansion(confdir) if err != nil { @@ -52,7 +51,7 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir stri return nil, err } - network, host, err := addr.DialArgs() + network, host, err := ma.DialArgs(addr) if err != nil { return nil, err } @@ -63,7 +62,7 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr *ma.Multiaddr, confdir stri return nil, err } - _, err = ofi.Write([]byte(host)) + _, err = ofi.Write([]byte(addr.String())) if err != nil { log.Warning("Could not write to rpcaddress file: %s", err) return nil, err @@ -133,6 +132,10 @@ func (dl *DaemonListener) handleConnection(conn net.Conn) { err = commands.Ls(dl.node, command.Args, command.Opts, conn) case "pin": err = commands.Pin(dl.node, command.Args, command.Opts, conn) + case "publish": + err = commands.Publish(dl.node, command.Args, command.Opts, conn) + case "resolve": + err = commands.Resolve(dl.node, command.Args, command.Opts, conn) default: err = fmt.Errorf("Invalid Command: '%s'", command.Command) } diff --git a/daemon/daemon_client.go b/daemon/daemon_client.go index 5ca0e3a3797..478b7fd7d33 100644 --- a/daemon/daemon_client.go +++ b/daemon/daemon_client.go @@ -60,19 +60,20 @@ func SendCommand(command *Command, confdir string) error { return ErrDaemonNotRunning } - log.Info("Daemon is running! %s", err) + log.Info("Daemon is running! [reason = %s]", err) server, err := getDaemonAddr(confdir) if err != nil { return err } + log.Info("Daemon address: %s", server) maddr, err := ma.NewMultiaddr(server) if err != nil { return err } - network, host, err := maddr.DialArgs() + network, host, err := ma.DialArgs(maddr) conn, err := net.Dial(network, host) if err != nil { diff --git a/exchange/bitswap/bitswap.go b/exchange/bitswap/bitswap.go index 4ba9e179fa4..20f9d234c06 100644 --- a/exchange/bitswap/bitswap.go +++ b/exchange/bitswap/bitswap.go @@ -61,7 +61,7 @@ type bitswap struct { // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { - u.DOut("Get Block %v\n", k.Pretty()) + u.DOut("Get Block %v\n", k) ctx, cancelFunc := context.WithCancel(parent) bs.wantlist.Add(k) @@ -110,7 +110,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) // HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { - u.DOut("Has Block %v\n", blk.Key().Pretty()) + u.DOut("Has Block %v\n", blk.Key()) bs.wantlist.Remove(blk.Key()) bs.sendToPeersThatWant(ctx, blk) return bs.routing.Provide(ctx, blk.Key()) @@ -119,7 +119,7 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error { // TODO(brian): handle errors func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) ( *peer.Peer, bsmsg.BitSwapMessage) { - u.DOut("ReceiveMessage from %v\n", p.Key().Pretty()) + u.DOut("ReceiveMessage from %v\n", p.Key()) if p == nil { // TODO propagate the error upward @@ -134,7 +134,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bs for _, block := range incoming.Blocks() { // TODO verify blocks? - if err := bs.blockstore.Put(block); err != nil { + if err := bs.blockstore.Put(&block); err != nil { continue // FIXME(brian): err ignored } go bs.notifications.Publish(block) @@ -173,10 +173,10 @@ func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessag } func (bs *bitswap) sendToPeersThatWant(ctx context.Context, block blocks.Block) { - u.DOut("Sending %v to peers that want it\n", block.Key().Pretty()) + u.DOut("Sending %v to peers that want it\n", block.Key()) for _, p := range bs.strategy.Peers() { if bs.strategy.BlockIsWantedByPeer(block.Key(), p) { - u.DOut("%v wants %v\n", p.Key().Pretty(), block.Key().Pretty()) + u.DOut("%v wants %v\n", p, block.Key()) if bs.strategy.ShouldSendBlockToPeer(block.Key(), p) { message := bsmsg.New() message.AppendBlock(block) diff --git a/exchange/bitswap/bitswap_test.go b/exchange/bitswap/bitswap_test.go index a9fc11f8210..d1c92d8d0fe 100644 --- a/exchange/bitswap/bitswap_test.go +++ b/exchange/bitswap/bitswap_test.go @@ -9,7 +9,7 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" - "github.com/jbenet/go-ipfs/blocks" + blocks "github.com/jbenet/go-ipfs/blocks" bstore "github.com/jbenet/go-ipfs/blockstore" exchange "github.com/jbenet/go-ipfs/exchange" notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications" @@ -18,7 +18,6 @@ import ( peer "github.com/jbenet/go-ipfs/peer" mock "github.com/jbenet/go-ipfs/routing/mock" util "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestGetBlockTimeout(t *testing.T) { @@ -30,7 +29,7 @@ func TestGetBlockTimeout(t *testing.T) { self := g.Next() ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) _, err := self.exchange.Block(ctx, block.Key()) if err != context.DeadlineExceeded { @@ -44,7 +43,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { rs := mock.VirtualRoutingServer() g := NewSessionGenerator(net, rs) - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) rs.Announce(&peer.Peer{}, block.Key()) // but not on network solo := g.Next() @@ -63,7 +62,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() - block := testutil.NewBlockOrFail(t, "block") + block := blocks.NewBlock([]byte("block")) g := NewSessionGenerator(net, rs) hasBlock := g.Next() @@ -71,7 +70,7 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { if err := hasBlock.blockstore.Put(block); err != nil { t.Fatal(err) } - if err := hasBlock.exchange.HasBlock(context.Background(), block); err != nil { + if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil { t.Fatal(err) } @@ -93,7 +92,7 @@ func TestSwarm(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator(t) + bg := NewBlockGenerator() t.Log("Create a ton of instances, and just a few blocks") @@ -107,7 +106,7 @@ func TestSwarm(t *testing.T) { first := instances[0] for _, b := range blocks { - first.blockstore.Put(*b) + first.blockstore.Put(b) first.exchange.HasBlock(context.Background(), *b) rs.Announce(first.peer, b.Key()) } @@ -154,55 +153,55 @@ func TestSendToWantingPeer(t *testing.T) { net := tn.VirtualNetwork() rs := mock.VirtualRoutingServer() sg := NewSessionGenerator(net, rs) - bg := NewBlockGenerator(t) + bg := NewBlockGenerator() me := sg.Next() w := sg.Next() o := sg.Next() - t.Logf("Session %v\n", me.peer.Key().Pretty()) - t.Logf("Session %v\n", w.peer.Key().Pretty()) - t.Logf("Session %v\n", o.peer.Key().Pretty()) + t.Logf("Session %v\n", me.peer) + t.Logf("Session %v\n", w.peer) + t.Logf("Session %v\n", o.peer) alpha := bg.Next() const timeout = 1 * time.Millisecond // FIXME don't depend on time - t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key()) ctx, _ := context.WithTimeout(context.Background(), timeout) _, err := w.exchange.Block(ctx, alpha.Key()) if err == nil { - t.Fatalf("Expected %v to NOT be available", alpha.Key().Pretty()) + t.Fatalf("Expected %v to NOT be available", alpha.Key()) } beta := bg.Next() - t.Logf("Peer %v announes availability of %v\n", w.peer.Key().Pretty(), beta.Key().Pretty()) + t.Logf("Peer %v announes availability of %v\n", w.peer, beta.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := w.blockstore.Put(beta); err != nil { + if err := w.blockstore.Put(&beta); err != nil { t.Fatal(err) } w.exchange.HasBlock(ctx, beta) - t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer.Key().Pretty(), beta.Key().Pretty(), w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if _, err := me.exchange.Block(ctx, beta.Key()); err != nil { t.Fatal(err) } - t.Logf("%v announces availability of %v\n", o.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v announces availability of %v\n", o.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) - if err := o.blockstore.Put(alpha); err != nil { + if err := o.blockstore.Put(&alpha); err != nil { t.Fatal(err) } o.exchange.HasBlock(ctx, alpha) - t.Logf("%v requests %v\n", me.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v requests %v\n", me.peer, alpha.Key()) ctx, _ = context.WithTimeout(context.Background(), timeout) if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil { t.Fatal(err) } - t.Logf("%v should now have %v\n", w.peer.Key().Pretty(), alpha.Key().Pretty()) + t.Logf("%v should now have %v\n", w.peer, alpha.Key()) block, err := w.blockstore.Get(alpha.Key()) if err != nil { t.Fatal("Should not have received an error") @@ -212,20 +211,17 @@ func TestSendToWantingPeer(t *testing.T) { } } -func NewBlockGenerator(t *testing.T) BlockGenerator { - return BlockGenerator{ - T: t, - } +func NewBlockGenerator() BlockGenerator { + return BlockGenerator{} } type BlockGenerator struct { - *testing.T // b/c block generation can fail - seq int + seq int } func (bg *BlockGenerator) Next() blocks.Block { bg.seq++ - return testutil.NewBlockOrFail(bg.T, string(bg.seq)) + return *blocks.NewBlock([]byte(string(bg.seq))) } func (bg *BlockGenerator) Blocks(n int) []*blocks.Block { diff --git a/exchange/bitswap/message/message.go b/exchange/bitswap/message/message.go index 22258e17fe9..a724f7cc752 100644 --- a/exchange/bitswap/message/message.go +++ b/exchange/bitswap/message/message.go @@ -32,19 +32,16 @@ func New() *message { return new(message) } -func newMessageFromProto(pbm PBMessage) (BitSwapMessage, error) { +func newMessageFromProto(pbm PBMessage) BitSwapMessage { m := New() for _, s := range pbm.GetWantlist() { m.AppendWanted(u.Key(s)) } for _, d := range pbm.GetBlocks() { - b, err := blocks.NewBlock(d) - if err != nil { - return nil, err - } + b := blocks.NewBlock(d) m.AppendBlock(*b) } - return m, nil + return m } // TODO(brian): convert these into keys @@ -70,10 +67,7 @@ func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { if err := proto.Unmarshal(nmsg.Data(), pb); err != nil { return nil, err } - m, err := newMessageFromProto(*pb) - if err != nil { - return nil, err - } + m := newMessageFromProto(*pb) return m, nil } diff --git a/exchange/bitswap/message/message_test.go b/exchange/bitswap/message/message_test.go index 9590f1ff1a3..b5954eba8ed 100644 --- a/exchange/bitswap/message/message_test.go +++ b/exchange/bitswap/message/message_test.go @@ -4,9 +4,9 @@ import ( "bytes" "testing" + "github.com/jbenet/go-ipfs/blocks" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestAppendWanted(t *testing.T) { @@ -26,10 +26,7 @@ func TestNewMessageFromProto(t *testing.T) { if !contains(protoMessage.Wantlist, str) { t.Fail() } - m, err := newMessageFromProto(*protoMessage) - if err != nil { - t.Fatal(err) - } + m := newMessageFromProto(*protoMessage) if !contains(m.ToProto().GetWantlist(), str) { t.Fail() } @@ -43,8 +40,8 @@ func TestAppendBlock(t *testing.T) { m := New() for _, str := range strs { - block := testutil.NewBlockOrFail(t, str) - m.AppendBlock(block) + block := blocks.NewBlock([]byte(str)) + m.AppendBlock(*block) } // assert strings are in proto message @@ -134,10 +131,10 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { func TestToAndFromNetMessage(t *testing.T) { original := New() - original.AppendBlock(testutil.NewBlockOrFail(t, "W")) - original.AppendBlock(testutil.NewBlockOrFail(t, "E")) - original.AppendBlock(testutil.NewBlockOrFail(t, "F")) - original.AppendBlock(testutil.NewBlockOrFail(t, "M")) + original.AppendBlock(*blocks.NewBlock([]byte("W"))) + original.AppendBlock(*blocks.NewBlock([]byte("E"))) + original.AppendBlock(*blocks.NewBlock([]byte("F"))) + original.AppendBlock(*blocks.NewBlock([]byte("M"))) p := &peer.Peer{ID: []byte("X")} netmsg, err := original.ToNet(p) diff --git a/exchange/bitswap/notifications/notifications_test.go b/exchange/bitswap/notifications/notifications_test.go index b12cc7d83ad..063634f61e9 100644 --- a/exchange/bitswap/notifications/notifications_test.go +++ b/exchange/bitswap/notifications/notifications_test.go @@ -6,25 +6,23 @@ import ( "time" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" - testutil "github.com/jbenet/go-ipfs/util/testutil" - blocks "github.com/jbenet/go-ipfs/blocks" ) func TestPublishSubscribe(t *testing.T) { - blockSent := testutil.NewBlockOrFail(t, "Greetings from The Interval") + blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) n := New() defer n.Shutdown() ch := n.Subscribe(context.Background(), blockSent.Key()) - n.Publish(blockSent) + n.Publish(*blockSent) blockRecvd, ok := <-ch if !ok { t.Fail() } - assertBlocksEqual(t, blockRecvd, blockSent) + assertBlocksEqual(t, blockRecvd, *blockSent) } @@ -35,7 +33,7 @@ func TestCarryOnWhenDeadlineExpires(t *testing.T) { n := New() defer n.Shutdown() - block := testutil.NewBlockOrFail(t, "A Missed Connection") + block := blocks.NewBlock([]byte("A Missed Connection")) blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) assertBlockChannelNil(t, blockChannel) diff --git a/exchange/bitswap/strategy/strategy_test.go b/exchange/bitswap/strategy/strategy_test.go index 21f293c1c25..dccc4a37465 100644 --- a/exchange/bitswap/strategy/strategy_test.go +++ b/exchange/bitswap/strategy/strategy_test.go @@ -4,9 +4,9 @@ import ( "strings" "testing" + blocks "github.com/jbenet/go-ipfs/blocks" message "github.com/jbenet/go-ipfs/exchange/bitswap/message" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) type peerAndStrategist struct { @@ -30,7 +30,7 @@ func TestConsistentAccounting(t *testing.T) { m := message.New() content := []string{"this", "is", "message", "i"} - m.AppendBlock(testutil.NewBlockOrFail(t, strings.Join(content, " "))) + m.AppendBlock(*blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.MessageSent(receiver.Peer, m) receiver.MessageReceived(sender.Peer, m) @@ -57,7 +57,7 @@ func TestBlockRecordedAsWantedAfterMessageReceived(t *testing.T) { beggar := newPeerAndStrategist("can't be chooser") chooser := newPeerAndStrategist("chooses JIF") - block := testutil.NewBlockOrFail(t, "data wanted by beggar") + block := blocks.NewBlock([]byte("data wanted by beggar")) messageFromBeggarToChooser := message.New() messageFromBeggarToChooser.AppendWanted(block.Key()) diff --git a/exchange/bitswap/testnet/network_test.go b/exchange/bitswap/testnet/network_test.go index 15502783eac..fbd7c88936b 100644 --- a/exchange/bitswap/testnet/network_test.go +++ b/exchange/bitswap/testnet/network_test.go @@ -5,10 +5,10 @@ import ( "testing" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message" bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network" peer "github.com/jbenet/go-ipfs/peer" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestSendRequestToCooperativePeer(t *testing.T) { @@ -33,7 +33,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { // TODO test contents of incoming message m := bsmsg.New() - m.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + m.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) return from, m })) @@ -41,7 +41,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) { t.Log("Build a message and send a synchronous request to recipient") message := bsmsg.New() - message.AppendBlock(testutil.NewBlockOrFail(t, "data")) + message.AppendBlock(*blocks.NewBlock([]byte("data"))) response, err := initiator.SendRequest( context.Background(), &peer.Peer{ID: idOfRecipient}, message) if err != nil { @@ -77,7 +77,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { *peer.Peer, bsmsg.BitSwapMessage) { msgToWaiter := bsmsg.New() - msgToWaiter.AppendBlock(testutil.NewBlockOrFail(t, expectedStr)) + msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr))) return fromWaiter, msgToWaiter })) @@ -105,7 +105,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { })) messageSentAsync := bsmsg.New() - messageSentAsync.AppendBlock(testutil.NewBlockOrFail(t, "data")) + messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data"))) errSending := waiter.SendMessage( context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync) if errSending != nil { diff --git a/exchange/offline/offline_test.go b/exchange/offline/offline_test.go index 26821f2c8a3..b759a61ca37 100644 --- a/exchange/offline/offline_test.go +++ b/exchange/offline/offline_test.go @@ -5,8 +5,8 @@ import ( context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" - testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestBlockReturnsErr(t *testing.T) { @@ -20,8 +20,8 @@ func TestBlockReturnsErr(t *testing.T) { func TestHasBlockReturnsNil(t *testing.T) { off := NewOfflineExchange() - block := testutil.NewBlockOrFail(t, "data") - err := off.HasBlock(context.Background(), block) + block := blocks.NewBlock([]byte("data")) + err := off.HasBlock(context.Background(), *block) if err != nil { t.Fatal("") } diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go new file mode 100644 index 00000000000..0d3ed7a842e --- /dev/null +++ b/fuse/ipns/ipns_test.go @@ -0,0 +1,337 @@ +package ipns + +import ( + "bytes" + "crypto/rand" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + fstest "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs/fstestutil" + core "github.com/jbenet/go-ipfs/core" + u "github.com/jbenet/go-ipfs/util" +) + +func maybeSkipFuseTests(t *testing.T) bool { + v := "TEST_NO_FUSE" + n := strings.ToLower(os.Getenv(v)) + skip := n != "" && n != "false" && n != "f" + + if skip { + t.Skipf("Skipping FUSE tests (%s=%s)", v, n) + } + return skip +} + +func randBytes(size int) []byte { + b := make([]byte, size) + rand.Read(b) + return b +} + +func writeFile(t *testing.T, size int, path string) []byte { + return writeFileData(t, randBytes(size), path) +} + +func writeFileData(t *testing.T, data []byte, path string) []byte { + fi, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + + n, err := fi.Write(data) + if err != nil { + t.Fatal(err) + } + + if n != len(data) { + t.Fatal("Didnt write proper amount!") + } + + err = fi.Close() + if err != nil { + t.Fatal(err) + } + + return data +} + +func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) { + maybeSkipFuseTests(t) + + var err error + if node == nil { + node, err = core.NewMockNode() + if err != nil { + t.Fatal(err) + } + } + + fs, err := NewIpns(node, "") + if err != nil { + t.Fatal(err) + } + mnt, err := fstest.MountedT(t, fs) + if err != nil { + t.Fatal(err) + } + + return node, mnt +} + +// Test writing a file and reading it back +func TestIpnsBasicIO(t *testing.T) { + _, mnt := setupIpnsTest(t, nil) + defer mnt.Close() + + fname := mnt.Dir + "/local/testfile" + data := writeFile(t, 12345, fname) + + rbuf, err := ioutil.ReadFile(fname) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(rbuf, data) { + t.Fatal("Incorrect Read!") + } +} + +// Test to make sure file changes persist over mounts of ipns +func TestFilePersistence(t *testing.T) { + node, mnt := setupIpnsTest(t, nil) + + fname := "/local/atestfile" + data := writeFile(t, 127, mnt.Dir+fname) + + // Wait for publish: TODO: make publish happen faster in tests + time.Sleep(time.Millisecond * 40) + + mnt.Close() + + node, mnt = setupIpnsTest(t, node) + defer mnt.Close() + + rbuf, err := ioutil.ReadFile(mnt.Dir + fname) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(rbuf, data) { + t.Fatalf("File data changed between mounts! sizes differ: %d != %d", len(data), len(rbuf)) + } +} + +// Test to make sure the filesystem reports file sizes correctly +func TestFileSizeReporting(t *testing.T) { + _, mnt := setupIpnsTest(t, nil) + defer mnt.Close() + + fname := mnt.Dir + "/local/sizecheck" + data := writeFile(t, 5555, fname) + + finfo, err := os.Stat(fname) + if err != nil { + t.Fatal(err) + } + + if finfo.Size() != int64(len(data)) { + t.Fatal("Read incorrect size from stat!") + } +} + +// Test to make sure you cant create multiple entries with the same name +func TestDoubleEntryFailure(t *testing.T) { + _, mnt := setupIpnsTest(t, nil) + defer mnt.Close() + + dname := mnt.Dir + "/local/thisisadir" + err := os.Mkdir(dname, 0777) + if err != nil { + t.Fatal(err) + } + + err = os.Mkdir(dname, 0777) + if err == nil { + t.Fatal("Should have gotten error one creating new directory.") + } +} + +func TestAppendFile(t *testing.T) { + _, mnt := setupIpnsTest(t, nil) + defer mnt.Close() + + fname := mnt.Dir + "/local/file" + data := writeFile(t, 1300, fname) + + fi, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND, 0666) + if err != nil { + t.Fatal(err) + } + + nudata := randBytes(500) + + n, err := fi.Write(nudata) + if err != nil { + t.Fatal(err) + } + err = fi.Close() + if err != nil { + t.Fatal(err) + } + + if n != len(nudata) { + t.Fatal("Failed to write enough bytes.") + } + + data = append(data, nudata...) + + rbuf, err := ioutil.ReadFile(fname) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(rbuf, data) { + t.Fatal("Data inconsistent!") + } +} + +func TestFastRepublish(t *testing.T) { + + // make timeout noticeable. + osrt := shortRepublishTimeout + shortRepublishTimeout = time.Millisecond * 100 + + olrt := longRepublishTimeout + longRepublishTimeout = time.Second + + node, mnt := setupIpnsTest(t, nil) + + h, err := node.Identity.PrivKey.GetPublic().Hash() + if err != nil { + t.Fatal(err) + } + pubkeyHash := u.Key(h).Pretty() + + // set them back + defer func() { + shortRepublishTimeout = osrt + longRepublishTimeout = olrt + mnt.Close() + }() + + closed := make(chan struct{}) + dataA := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + dataB := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + + fname := mnt.Dir + "/local/file" + + // get first resolved hash + log.Debug("publishing first hash") + writeFileData(t, dataA, fname) // random + <-time.After(shortRepublishTimeout * 11 / 10) + log.Debug("resolving first hash") + resolvedHash, err := node.Namesys.Resolve(pubkeyHash) + if err != nil { + t.Fatal("resolve err:", pubkeyHash, err) + } + + // constantly keep writing to the file + go func() { + for { + select { + case <-closed: + return + + case <-time.After(shortRepublishTimeout * 8 / 10): + writeFileData(t, dataB, fname) + } + } + }() + + hasPublished := func() bool { + res, err := node.Namesys.Resolve(pubkeyHash) + if err != nil { + t.Fatal("resolve err: %v", err) + } + return res != resolvedHash + } + + // test things + + // at this point, should not have written dataA and not have written dataB + rbuf, err := ioutil.ReadFile(fname) + if err != nil || !bytes.Equal(rbuf, dataA) { + t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) + } + + if hasPublished() { + t.Fatal("published (wrote)") + } + + <-time.After(shortRepublishTimeout * 11 / 10) + + // at this point, should have written written dataB, but not published it + rbuf, err = ioutil.ReadFile(fname) + if err != nil || !bytes.Equal(rbuf, dataB) { + t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) + } + + if hasPublished() { + t.Fatal("published (wrote)") + } + + <-time.After(longRepublishTimeout * 11 / 10) + + // at this point, should have written written dataB, and published it + rbuf, err = ioutil.ReadFile(fname) + if err != nil || !bytes.Equal(rbuf, dataB) { + t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) + } + + if !hasPublished() { + t.Fatal("not published") + } + + close(closed) +} + +// Test writing a medium sized file one byte at a time +func TestMultiWrite(t *testing.T) { + if runtime.GOOS == "darwin" { + link := "https://github.com/jbenet/go-ipfs/issues/147" + t.Skipf("Skipping as is broken in OSX. See %s", link) + } + + _, mnt := setupIpnsTest(t, nil) + defer mnt.Close() + + fpath := mnt.Dir + "/local/file" + fi, err := os.Create(fpath) + if err != nil { + t.Fatal(err) + } + + data := randBytes(1001) + for i := 0; i < len(data); i++ { + n, err := fi.Write(data[i : i+1]) + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") + } + } + fi.Close() + + rbuf, err := ioutil.ReadFile(fpath) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(rbuf, data) { + t.Fatal("File on disk did not match bytes written") + } +} diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go new file mode 100644 index 00000000000..3f56dbb6821 --- /dev/null +++ b/fuse/ipns/ipns_unix.go @@ -0,0 +1,568 @@ +package ipns + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + "github.com/jbenet/go-ipfs/core" + ci "github.com/jbenet/go-ipfs/crypto" + "github.com/jbenet/go-ipfs/importer/chunk" + mdag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + uio "github.com/jbenet/go-ipfs/unixfs/io" + u "github.com/jbenet/go-ipfs/util" +) + +var log = u.Logger("ipns") + +var ( + shortRepublishTimeout = time.Millisecond * 5 + longRepublishTimeout = time.Millisecond * 500 +) + +// FileSystem is the readwrite IPNS Fuse Filesystem. +type FileSystem struct { + Ipfs *core.IpfsNode + RootNode *Root +} + +// NewFileSystem constructs new fs using given core.IpfsNode instance. +func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) { + root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey}, ipfspath) + if err != nil { + return nil, err + } + return &FileSystem{Ipfs: ipfs, RootNode: root}, nil +} + +func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, error) { + root := new(Root) + root.LocalDirs = make(map[string]*Node) + root.Ipfs = n + abspath, err := filepath.Abs(ipfsroot) + if err != nil { + return nil, err + } + root.IpfsRoot = abspath + + root.Keys = keys + + if len(keys) == 0 { + log.Warning("No keys given for ipns root creation") + } else { + k := keys[0] + pub := k.GetPublic() + hash, err := pub.Hash() + if err != nil { + log.Error("Read Root Error: %s", err) + return nil, err + } + root.LocalLink = &Link{u.Key(hash).Pretty()} + } + + for _, k := range keys { + hash, err := k.GetPublic().Hash() + if err != nil { + log.Error("failed to hash public key.") + continue + } + name := u.Key(hash).Pretty() + nd := new(Node) + nd.Ipfs = n + nd.key = k + nd.repub = NewRepublisher(nd, shortRepublishTimeout, longRepublishTimeout) + + go nd.repub.Run() + + pointsTo, err := n.Namesys.Resolve(name) + if err != nil { + log.Warning("Could not resolve value for local ipns entry, providing empty dir") + nd.Nd = &mdag.Node{Data: ft.FolderPBData()} + root.LocalDirs[name] = nd + continue + } + + if !u.IsValidHash(pointsTo) { + log.Critical("Got back bad data from namesys resolve! [%s]", pointsTo) + return nil, nil + } + + node, err := n.Resolver.ResolvePath(pointsTo) + if err != nil { + log.Warning("Failed to resolve value from ipns entry in ipfs") + continue + } + + nd.Nd = node + root.LocalDirs[name] = nd + } + + return root, nil +} + +// Root constructs the Root of the filesystem, a Root object. +func (f FileSystem) Root() (fs.Node, fuse.Error) { + return f.RootNode, nil +} + +// Root is the root object of the filesystem tree. +type Root struct { + Ipfs *core.IpfsNode + Keys []ci.PrivKey + + // Used for symlinking into ipfs + IpfsRoot string + LocalDirs map[string]*Node + + LocalLink *Link +} + +// Attr returns file attributes. +func (*Root) Attr() fuse.Attr { + return fuse.Attr{Mode: os.ModeDir | 0111} // -rw+x +} + +// Lookup performs a lookup under this node. +func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + log.Debug("ipns: Root Lookup: '%s'", name) + switch name { + case "mach_kernel", ".hidden", "._.": + // Just quiet some log noise on OS X. + return nil, fuse.ENOENT + } + + if name == "local" { + if s.LocalLink == nil { + return nil, fuse.ENOENT + } + return s.LocalLink, nil + } + + nd, ok := s.LocalDirs[name] + if ok { + return nd, nil + } + + log.Debug("ipns: Falling back to resolution for [%s].", name) + resolved, err := s.Ipfs.Namesys.Resolve(name) + if err != nil { + log.Warning("ipns: namesys resolve error: %s", err) + return nil, fuse.ENOENT + } + + return &Link{s.IpfsRoot + "/" + resolved}, nil +} + +// ReadDir reads a particular directory. Disallowed for root. +func (r *Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + log.Debug("Read Root.") + listing := []fuse.Dirent{ + fuse.Dirent{ + Name: "local", + Type: fuse.DT_Link, + }, + } + for _, k := range r.Keys { + pub := k.GetPublic() + hash, err := pub.Hash() + if err != nil { + log.Error("Read Root Error: %s", err) + continue + } + ent := fuse.Dirent{ + Name: u.Key(hash).Pretty(), + Type: fuse.DT_Dir, + } + listing = append(listing, ent) + } + return listing, nil +} + +// Node is the core object representing a filesystem tree node. +type Node struct { + root *Root + nsRoot *Node + parent *Node + + repub *Republisher + + // This nodes name in its parent dir. + // NOTE: this strategy wont work well if we allow hard links + // (im all for murdering the thought of hard links) + name string + + // Private keys held by nodes at the root of a keyspace + // WARNING(security): the PrivKey interface is currently insecure + // (holds the raw key). It will be secured later. + key ci.PrivKey + + Ipfs *core.IpfsNode + Nd *mdag.Node + dagMod *uio.DagModifier + cached *ft.PBData +} + +func (s *Node) loadData() error { + s.cached = new(ft.PBData) + return proto.Unmarshal(s.Nd.Data, s.cached) +} + +// Attr returns the attributes of a given node. +func (s *Node) Attr() fuse.Attr { + if s.cached == nil { + err := s.loadData() + if err != nil { + log.Error("Error loading PBData for file: '%s'", s.name) + } + } + switch s.cached.GetType() { + case ft.PBData_Directory: + return fuse.Attr{Mode: os.ModeDir | 0555} + case ft.PBData_File, ft.PBData_Raw: + size, err := ft.DataSize(s.Nd.Data) + if err != nil { + log.Error("Error getting size of file: %s", err) + size = 0 + } + return fuse.Attr{ + Mode: 0666, + Size: size, + Blocks: uint64(len(s.Nd.Links)), + } + default: + log.Error("Invalid data type.") + return fuse.Attr{} + } +} + +// Lookup performs a lookup under this node. +func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { + log.Debug("ipns: node[%s] Lookup '%s'", s.name, name) + nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name}) + if err != nil { + // todo: make this error more versatile. + return nil, fuse.ENOENT + } + + return s.makeChild(name, nd), nil +} + +func (n *Node) makeChild(name string, node *mdag.Node) *Node { + child := &Node{ + Ipfs: n.Ipfs, + Nd: node, + name: name, + nsRoot: n.nsRoot, + parent: n, + } + + // Always ensure that each child knows where the root is + if n.nsRoot == nil { + child.nsRoot = n + } else { + child.nsRoot = n.nsRoot + } + + return child +} + +// ReadDir reads the link structure as directory entries +func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { + log.Debug("Node ReadDir") + entries := make([]fuse.Dirent, len(s.Nd.Links)) + for i, link := range s.Nd.Links { + n := link.Name + if len(n) == 0 { + n = link.Hash.B58String() + } + entries[i] = fuse.Dirent{Name: n, Type: fuse.DT_File} + } + + if len(entries) > 0 { + return entries, nil + } + return nil, fuse.ENOENT +} + +// ReadAll reads the object data as file data +func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { + log.Debug("ipns: ReadAll [%s]", s.name) + r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG) + if err != nil { + return nil, err + } + // this is a terrible function... 'ReadAll'? + // what if i have a 6TB file? GG RAM. + b, err := ioutil.ReadAll(r) + if err != nil { + log.Error("[%s] Readall error: %s", s.name, err) + return nil, err + } + return b, nil +} + +func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { + log.Debug("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data)) + + if n.dagMod == nil { + // Create a DagModifier to allow us to change the existing dag node + dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter) + if err != nil { + log.Error("Error creating dag modifier: %s", err) + return err + } + n.dagMod = dmod + } + wrote, err := n.dagMod.WriteAt(req.Data, uint64(req.Offset)) + if err != nil { + return err + } + resp.Size = wrote + return nil +} + +func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error { + log.Debug("Got flush request [%s]!", n.name) + + // If a write has happened + if n.dagMod != nil { + newNode, err := n.dagMod.GetNode() + if err != nil { + log.Error("Error getting dag node from dagMod: %s", err) + return err + } + + if n.parent != nil { + log.Debug("updating self in parent!") + err := n.parent.update(n.name, newNode) + if err != nil { + log.Critical("error in updating ipns dag tree: %s", err) + // return fuse.ETHISISPRETTYBAD + return err + } + } + n.Nd = newNode + + /*/TEMP + dr, err := mdag.NewDagReader(n.Nd, n.Ipfs.DAG) + if err != nil { + log.Critical("Verification read failed.") + } + b, err := ioutil.ReadAll(dr) + if err != nil { + log.Critical("Verification read failed.") + } + fmt.Println("VERIFICATION READ") + fmt.Printf("READ %d BYTES\n", len(b)) + fmt.Println(string(b)) + fmt.Println(b) + //*/ + + n.dagMod = nil + + n.wasChanged() + } + return nil +} + +// Signal that a node in this tree was changed so the root can republish +func (n *Node) wasChanged() { + root := n.nsRoot + if root == nil { + root = n + } + + root.repub.Publish <- struct{}{} +} + +func (n *Node) republishRoot() error { + log.Debug("Republish root") + + // We should already be the root, this is just a sanity check + var root *Node + if n.nsRoot != nil { + root = n.nsRoot + } else { + root = n + } + + // Add any nodes that may be new to the DAG service + err := n.Ipfs.DAG.AddRecursive(root.Nd) + if err != nil { + log.Critical("ipns: Dag Add Error: %s", err) + return err + } + + ndkey, err := root.Nd.Key() + if err != nil { + log.Error("getKey error: %s", err) + return err + } + log.Debug("Publishing changes!") + + err = n.Ipfs.Namesys.Publish(root.key, ndkey.Pretty()) + if err != nil { + log.Error("ipns: Publish Failed: %s", err) + return err + } + return nil +} + +func (n *Node) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error { + log.Debug("Got fsync request!") + return nil +} + +func (n *Node) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) { + log.Debug("Got mkdir request!") + dagnd := &mdag.Node{Data: ft.FolderPBData()} + nnode := n.Nd.Copy() + nnode.AddNodeLink(req.Name, dagnd) + + child := &Node{ + Ipfs: n.Ipfs, + Nd: dagnd, + name: req.Name, + } + + if n.nsRoot == nil { + child.nsRoot = n + } else { + child.nsRoot = n.nsRoot + } + + if n.parent != nil { + err := n.parent.update(n.name, nnode) + if err != nil { + log.Critical("Error updating node: %s", err) + return nil, err + } + } + n.Nd = nnode + + n.wasChanged() + + return child, nil +} + +func (n *Node) Open(req *fuse.OpenRequest, resp *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { + //log.Debug("[%s] Received open request! flags = %s", n.name, req.Flags.String()) + //TODO: check open flags and truncate if necessary + if req.Flags&fuse.OpenTruncate != 0 { + log.Warning("Need to truncate file!") + n.cached = nil + n.Nd = &mdag.Node{Data: ft.FilePBData(nil, 0)} + } else if req.Flags&fuse.OpenAppend != 0 { + log.Warning("Need to append to file!") + } + return n, nil +} + +func (n *Node) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error) { + log.Debug("Got mknod request!") + return nil, nil +} + +func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { + log.Debug("Got create request: %s", req.Name) + + // New 'empty' file + nd := &mdag.Node{Data: ft.FilePBData(nil, 0)} + child := n.makeChild(req.Name, nd) + + nnode := n.Nd.Copy() + + err := nnode.AddNodeLink(req.Name, nd) + if err != nil { + log.Error("Error adding child to node: %s", err) + return nil, nil, err + } + if n.parent != nil { + err := n.parent.update(n.name, nnode) + if err != nil { + log.Critical("Error updating node: %s", err) + // Can we panic, please? + return nil, nil, err + } + } + n.Nd = nnode + n.wasChanged() + + return child, child, nil +} + +func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error { + log.Debug("[%s] Got Remove request: %s", n.name, req.Name) + nnode := n.Nd.Copy() + err := nnode.RemoveNodeLink(req.Name) + if err != nil { + log.Error("Remove: No such file.") + return fuse.ENOENT + } + + if n.parent != nil { + err := n.parent.update(n.name, nnode) + if err != nil { + log.Critical("Error updating node: %s", err) + return err + } + } + n.Nd = nnode + n.wasChanged() + return nil +} + +func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { + log.Debug("Got Rename request '%s' -> '%s'", req.OldName, req.NewName) + var mdn *mdag.Node + for _, l := range n.Nd.Links { + if l.Name == req.OldName { + mdn = l.Node + } + } + if mdn == nil { + log.Critical("nil Link found on rename!") + return fuse.ENOENT + } + n.Nd.RemoveNodeLink(req.OldName) + + switch newDir := newDir.(type) { + case *Node: + err := newDir.Nd.AddNodeLink(req.NewName, mdn) + if err != nil { + log.Error("Error adding node to new dir on rename: %s", err) + return err + } + default: + log.Critical("Unknown node type for rename target dir!") + return errors.New("Unknown fs node type!") + } + return nil +} + +// Updates the child of this node, specified by name to the given newnode +func (n *Node) update(name string, newnode *mdag.Node) error { + log.Debug("update '%s' in '%s'", name, n.name) + nnode := n.Nd.Copy() + err := nnode.RemoveNodeLink(name) + if err != nil { + return err + } + nnode.AddNodeLink(name, newnode) + + if n.parent != nil { + err := n.parent.update(n.name, nnode) + if err != nil { + return err + } + } + n.Nd = nnode + return nil +} diff --git a/fuse/ipns/link_unix.go b/fuse/ipns/link_unix.go new file mode 100644 index 00000000000..e81576c6e66 --- /dev/null +++ b/fuse/ipns/link_unix.go @@ -0,0 +1,24 @@ +package ipns + +import ( + "os" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" +) + +type Link struct { + Target string +} + +func (l *Link) Attr() fuse.Attr { + log.Debug("Link attr.") + return fuse.Attr{ + Mode: os.ModeSymlink | 0555, + } +} + +func (l *Link) Readlink(req *fuse.ReadlinkRequest, intr fs.Intr) (string, fuse.Error) { + log.Debug("ReadLink: %s", l.Target) + return l.Target, nil +} diff --git a/fuse/ipns/mount_unix.go b/fuse/ipns/mount_unix.go new file mode 100644 index 00000000000..7df6f140c5d --- /dev/null +++ b/fuse/ipns/mount_unix.go @@ -0,0 +1,91 @@ +package ipns + +import ( + "fmt" + "os" + "os/exec" + "os/signal" + "runtime" + "syscall" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" + "github.com/jbenet/go-ipfs/core" +) + +// Mount mounts an IpfsNode instance at a particular path. It +// serves until the process receives exit signals (to Unmount). +func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error { + + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, + syscall.SIGTERM, syscall.SIGQUIT) + + go func() { + <-sigc + for { + err := Unmount(fpath) + if err == nil { + return + } + time.Sleep(time.Millisecond * 100) + } + ipfs.Network.Close() + }() + + c, err := fuse.Mount(fpath) + if err != nil { + return err + } + defer c.Close() + + fsys, err := NewIpns(ipfs, ipfspath) + if err != nil { + return err + } + + err = fs.Serve(c, fsys) + if err != nil { + return err + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + return err + } + return nil +} + +// Unmount attempts to unmount the provided FUSE mount point, forcibly +// if necessary. +func Unmount(point string) error { + fmt.Printf("Unmounting %s...\n", point) + + var cmd *exec.Cmd + switch runtime.GOOS { + case "darwin": + cmd = exec.Command("diskutil", "umount", "force", point) + case "linux": + cmd = exec.Command("fusermount", "-u", point) + default: + return fmt.Errorf("unmount: unimplemented") + } + + errc := make(chan error, 1) + go func() { + if err := exec.Command("umount", point).Run(); err == nil { + errc <- err + } + // retry to unmount with the fallback cmd + errc <- cmd.Run() + }() + + select { + case <-time.After(1 * time.Second): + return fmt.Errorf("umount timeout") + case err := <-errc: + return err + } +} diff --git a/fuse/ipns/repub_unix.go b/fuse/ipns/repub_unix.go new file mode 100644 index 00000000000..4e807578d3a --- /dev/null +++ b/fuse/ipns/repub_unix.go @@ -0,0 +1,42 @@ +package ipns + +import "time" + +type Republisher struct { + TimeoutLong time.Duration + TimeoutShort time.Duration + Publish chan struct{} + node *Node +} + +func NewRepublisher(n *Node, tshort, tlong time.Duration) *Republisher { + return &Republisher{ + TimeoutShort: tshort, + TimeoutLong: tlong, + Publish: make(chan struct{}), + node: n, + } +} + +func (np *Republisher) Run() { + for _ = range np.Publish { + quick := time.After(np.TimeoutShort) + longer := time.After(np.TimeoutLong) + + wait: + select { + case <-quick: + case <-longer: + case <-np.Publish: + quick = time.After(np.TimeoutShort) + goto wait + } + + log.Info("Publishing Changes!") + err := np.node.republishRoot() + if err != nil { + log.Critical("republishRoot error: %s", err) + } + + } +} diff --git a/fuse/ipns/writerat.go b/fuse/ipns/writerat.go new file mode 100644 index 00000000000..c5ddf5c5c4c --- /dev/null +++ b/fuse/ipns/writerat.go @@ -0,0 +1,29 @@ +package ipns + +import "io" + +type WriteAtBuf interface { + io.WriterAt + Bytes() []byte +} + +type writerAt struct { + buf []byte +} + +func NewWriterAtFromBytes(b []byte) WriteAtBuf { + return &writerAt{b} +} + +// TODO: make this better in the future, this is just a quick hack for now +func (wa *writerAt) WriteAt(p []byte, off int64) (int, error) { + if off+int64(len(p)) > int64(len(wa.buf)) { + wa.buf = append(wa.buf, make([]byte, (int(off)+len(p))-len(wa.buf))...) + } + copy(wa.buf[off:], p) + return len(p), nil +} + +func (wa *writerAt) Bytes() []byte { + return wa.buf +} diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 897c90d78a9..2091495f196 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -14,13 +14,19 @@ import ( "syscall" "time" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs" core "github.com/jbenet/go-ipfs/core" mdag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + uio "github.com/jbenet/go-ipfs/unixfs/io" u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("ipfs") + // FileSystem is the readonly Ipfs Fuse Filesystem. type FileSystem struct { Ipfs *core.IpfsNode @@ -48,7 +54,7 @@ func (*Root) Attr() fuse.Attr { // Lookup performs a lookup under this node. func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { - u.DOut("Root Lookup: '%s'\n", name) + log.Debug("Root Lookup: '%s'", name) switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. @@ -66,31 +72,48 @@ func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { // ReadDir reads a particular directory. Disallowed for root. func (*Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { - u.DOut("Read Root.\n") + log.Debug("Read Root.") return nil, fuse.EPERM } // Node is the core object representing a filesystem tree node. type Node struct { - Ipfs *core.IpfsNode - Nd *mdag.Node - fd *mdag.DagReader + Ipfs *core.IpfsNode + Nd *mdag.Node + fd *uio.DagReader + cached *ft.PBData +} + +func (s *Node) loadData() error { + s.cached = new(ft.PBData) + return proto.Unmarshal(s.Nd.Data, s.cached) } // Attr returns the attributes of a given node. func (s *Node) Attr() fuse.Attr { - u.DOut("Node attr.\n") - if len(s.Nd.Links) > 0 { + log.Debug("Node attr.") + if s.cached == nil { + s.loadData() + } + switch s.cached.GetType() { + case ft.PBData_Directory: return fuse.Attr{Mode: os.ModeDir | 0555} + case ft.PBData_File, ft.PBData_Raw: + size, _ := s.Nd.Size() + return fuse.Attr{ + Mode: 0444, + Size: uint64(size), + Blocks: uint64(len(s.Nd.Links)), + } + default: + u.PErr("Invalid data type.") + return fuse.Attr{} } - - size, _ := s.Nd.Size() - return fuse.Attr{Mode: 0444, Size: uint64(size)} } // Lookup performs a lookup under this node. func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { - u.DOut("Lookup '%s'\n", name) + log.Debug("Lookup '%s'", name) nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name}) if err != nil { // todo: make this error more versatile. @@ -102,7 +125,7 @@ func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { // ReadDir reads the link structure as directory entries func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { - u.DOut("Node ReadDir\n") + log.Debug("Node ReadDir") entries := make([]fuse.Dirent, len(s.Nd.Links)) for i, link := range s.Nd.Links { n := link.Name @@ -121,7 +144,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { // ReadAll reads the object data as file data func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { u.DOut("Read node.\n") - r, err := mdag.NewDagReader(s.Nd, s.Ipfs.DAG) + r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG) if err != nil { return nil, err } @@ -147,6 +170,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error { } time.Sleep(time.Millisecond * 10) } + ipfs.Network.Close() }() c, err := fuse.Mount(fpath) @@ -171,7 +195,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error { // Unmount attempts to unmount the provided FUSE mount point, forcibly // if necessary. func Unmount(point string) error { - fmt.Printf("Unmounting %s...\n", point) + log.Info("Unmounting %s...", point) var cmd *exec.Cmd switch runtime.GOOS { diff --git a/importer/rabin.go b/importer/chunk/rabin.go similarity index 73% rename from importer/rabin.go rename to importer/chunk/rabin.go index 3eab5bc9cf0..fbfb4cec41e 100644 --- a/importer/rabin.go +++ b/importer/chunk/rabin.go @@ -1,4 +1,4 @@ -package importer +package chunk import ( "bufio" @@ -92,43 +92,3 @@ func (mr *MaybeRabin) Split(r io.Reader) chan []byte { }() return out } - -/* -func WhyrusleepingCantImplementRabin(r io.Reader) chan []byte { - out := make(chan []byte, 4) - go func() { - buf := bufio.NewReader(r) - blkbuf := new(bytes.Buffer) - window := make([]byte, 16) - var val uint64 - prime := uint64(61) - - get := func(i int) uint64 { - return uint64(window[i%len(window)]) - } - - set := func(i int, val byte) { - window[i%len(window)] = val - } - - for i := 0; ; i++ { - curb, err := buf.ReadByte() - if err != nil { - break - } - set(i, curb) - blkbuf.WriteByte(curb) - - hash := md5.Sum(window) - if hash[0] == 0 && hash[1] == 0 { - out <- blkbuf.Bytes() - blkbuf.Reset() - } - } - out <- blkbuf.Bytes() - close(out) - }() - - return out -} -*/ diff --git a/importer/splitting.go b/importer/chunk/splitting.go similarity index 71% rename from importer/splitting.go rename to importer/chunk/splitting.go index 05eaec15ae5..0b5717eaf6e 100644 --- a/importer/splitting.go +++ b/importer/chunk/splitting.go @@ -1,13 +1,17 @@ -package importer +package chunk import ( "io" - u "github.com/jbenet/go-ipfs/util" + "github.com/jbenet/go-ipfs/util" ) +var log = util.Logger("chunk") + +var DefaultSplitter = &SizeSplitter{1024 * 512} + type BlockSplitter interface { - Split(io.Reader) chan []byte + Split(r io.Reader) chan []byte } type SizeSplitter struct { @@ -28,7 +32,7 @@ func (ss *SizeSplitter) Split(r io.Reader) chan []byte { } return } - u.PErr("block split error: %v\n", err) + log.Error("Block split error: %s", err) return } if nread < ss.Size { diff --git a/importer/importer.go b/importer/importer.go index 197eaef19fe..0a4d9848e4e 100644 --- a/importer/importer.go +++ b/importer/importer.go @@ -5,9 +5,14 @@ import ( "io" "os" + "github.com/jbenet/go-ipfs/importer/chunk" dag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + "github.com/jbenet/go-ipfs/util" ) +var log = util.Logger("importer") + // BlockSizeLimit specifies the maximum size an imported block can have. var BlockSizeLimit = int64(1048576) // 1 MB @@ -20,22 +25,31 @@ var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded") // NewDagFromReader constructs a Merkle DAG from the given io.Reader. // size required for block construction. func NewDagFromReader(r io.Reader) (*dag.Node, error) { - return NewDagFromReaderWithSplitter(r, &SizeSplitter{1024 * 512}) + return NewDagFromReaderWithSplitter(r, chunk.DefaultSplitter) } -func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) { +func NewDagFromReaderWithSplitter(r io.Reader, spl chunk.BlockSplitter) (*dag.Node, error) { blkChan := spl.Split(r) first := <-blkChan - root := &dag.Node{Data: dag.FilePBData(first)} + root := &dag.Node{} + mbf := new(ft.MultiBlock) for blk := range blkChan { - child := &dag.Node{Data: dag.WrapData(blk)} + mbf.AddBlockSize(uint64(len(blk))) + child := &dag.Node{Data: ft.WrapData(blk)} err := root.AddNodeLink("", child) if err != nil { return nil, err } } + mbf.Data = first + data, err := mbf.GetBytes() + if err != nil { + return nil, err + } + + root.Data = data return root, nil } diff --git a/importer/importer_test.go b/importer/importer_test.go index 9fb1afa0809..dd52f9e1fc3 100644 --- a/importer/importer_test.go +++ b/importer/importer_test.go @@ -9,9 +9,13 @@ import ( "os" "testing" - dag "github.com/jbenet/go-ipfs/merkledag" + "github.com/jbenet/go-ipfs/importer/chunk" + uio "github.com/jbenet/go-ipfs/unixfs/io" ) +// NOTE: +// These tests tests a combination of unixfs/io/dagreader and importer/chunk. +// Maybe split them up somehow? func TestBuildDag(t *testing.T) { td := os.TempDir() fi, err := os.Create(td + "/tmpfi") @@ -34,24 +38,30 @@ func TestBuildDag(t *testing.T) { //Test where calls to read are smaller than the chunk size func TestSizeBasedSplit(t *testing.T) { - bs := &SizeSplitter{512} + bs := &chunk.SizeSplitter{512} testFileConsistency(t, bs, 32*512) - bs = &SizeSplitter{4096} + bs = &chunk.SizeSplitter{4096} testFileConsistency(t, bs, 32*4096) // Uneven offset testFileConsistency(t, bs, 31*4095) } -func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) { +func dup(b []byte) []byte { + o := make([]byte, len(b)) + copy(o, b) + return o +} + +func testFileConsistency(t *testing.T, bs chunk.BlockSplitter, nbytes int) { buf := new(bytes.Buffer) io.CopyN(buf, rand.Reader, int64(nbytes)) - should := buf.Bytes() + should := dup(buf.Bytes()) nd, err := NewDagFromReaderWithSplitter(buf, bs) if err != nil { t.Fatal(err) } - r, err := dag.NewDagReader(nd, nil) + r, err := uio.NewDagReader(nd, nil) if err != nil { t.Fatal(err) } @@ -80,14 +90,14 @@ func arrComp(a, b []byte) error { } func TestMaybeRabinConsistency(t *testing.T) { - testFileConsistency(t, NewMaybeRabin(4096), 256*4096) + testFileConsistency(t, chunk.NewMaybeRabin(4096), 256*4096) } func TestRabinBlockSize(t *testing.T) { buf := new(bytes.Buffer) nbytes := 1024 * 1024 io.CopyN(buf, rand.Reader, int64(nbytes)) - rab := NewMaybeRabin(4096) + rab := chunk.NewMaybeRabin(4096) blkch := rab.Split(buf) var blocks [][]byte diff --git a/merkledag/Makefile b/merkledag/Makefile index 2524ed3ba48..711f34bdac2 100644 --- a/merkledag/Makefile +++ b/merkledag/Makefile @@ -1,11 +1,8 @@ -all: node.pb.go data.pb.go +all: node.pb.go node.pb.go: node.proto protoc --gogo_out=. --proto_path=../../../../:/usr/local/opt/protobuf/include:. $< -data.pb.go: data.proto - protoc --go_out=. data.proto - clean: rm node.pb.go diff --git a/merkledag/coding.go b/merkledag/coding.go index 45142ac4724..1d83f32ef0f 100644 --- a/merkledag/coding.go +++ b/merkledag/coding.go @@ -3,6 +3,8 @@ package merkledag import ( "fmt" + u "github.com/jbenet/go-ipfs/util" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) @@ -76,6 +78,7 @@ func (n *Node) Encoded(force bool) ([]byte, error) { if err != nil { return []byte{}, err } + n.cached = u.Hash(n.encoded) } return n.encoded, nil diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index 1ec5f3c5e75..f0c93ad63d6 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -3,14 +3,14 @@ package merkledag import ( "fmt" - "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" blocks "github.com/jbenet/go-ipfs/blocks" bserv "github.com/jbenet/go-ipfs/blockservice" u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("merkledag") + // NodeMap maps u.Keys to Nodes. // We cannot use []byte/Multihash for keys :( // so have to convert Multihash bytes to string (u.Key) @@ -24,6 +24,8 @@ type Node struct { // cache encoded/marshaled value encoded []byte + + cached mh.Multihash } // Link represents an IPFS Merkle DAG Link between Nodes. @@ -41,27 +43,70 @@ type Link struct { Node *Node } +func MakeLink(n *Node) (*Link, error) { + s, err := n.Size() + if err != nil { + return nil, err + } + + h, err := n.Multihash() + if err != nil { + return nil, err + } + return &Link{ + Size: s, + Hash: h, + }, nil +} + // AddNodeLink adds a link to another node. func (n *Node) AddNodeLink(name string, that *Node) error { - s, err := that.Size() + lnk, err := MakeLink(that) if err != nil { return err } + lnk.Name = name + lnk.Node = that + + n.Links = append(n.Links, lnk) + return nil +} - h, err := that.Multihash() +// AddNodeLink adds a link to another node. without keeping a reference to +// the child node +func (n *Node) AddNodeLinkClean(name string, that *Node) error { + lnk, err := MakeLink(that) if err != nil { return err } + lnk.Name = name - n.Links = append(n.Links, &Link{ - Name: name, - Size: s, - Hash: h, - Node: that, - }) + n.Links = append(n.Links, lnk) return nil } +func (n *Node) RemoveNodeLink(name string) error { + for i, l := range n.Links { + if l.Name == name { + n.Links = append(n.Links[:i], n.Links[i+1:]...) + return nil + } + } + return u.ErrNotFound +} + +// Copy returns a copy of the node. +// NOTE: does not make copies of Node objects in the links. +func (n *Node) Copy() *Node { + nnode := new(Node) + nnode.Data = make([]byte, len(n.Data)) + copy(nnode.Data, n.Data) + + nnode.Links = make([]*Link, len(n.Links)) + copy(nnode.Links, n.Links) + return nnode +} + // Size returns the total size of the data addressed by node, // including the total sizes of references. func (n *Node) Size() (uint64, error) { @@ -79,12 +124,12 @@ func (n *Node) Size() (uint64, error) { // Multihash hashes the encoded data of this node. func (n *Node) Multihash() (mh.Multihash, error) { - b, err := n.Encoded(false) + _, err := n.Encoded(false) if err != nil { return nil, err } - return u.Hash(b) + return n.cached, nil } // Key returns the Multihash as a key, for maps. @@ -105,7 +150,7 @@ type DAGService struct { // Add adds a node to the DAGService, storing the block in the BlockService func (n *DAGService) Add(nd *Node) (u.Key, error) { k, _ := nd.Key() - u.DOut("DagService Add [%s]\n", k.Pretty()) + log.Debug("DagService Add [%s]", k) if n == nil { return "", fmt.Errorf("DAGService is nil") } @@ -115,7 +160,9 @@ func (n *DAGService) Add(nd *Node) (u.Key, error) { return "", err } - b, err := blocks.NewBlock(d) + b := new(blocks.Block) + b.Data = d + b.Multihash, err = nd.Multihash() if err != nil { return "", err } @@ -126,16 +173,16 @@ func (n *DAGService) Add(nd *Node) (u.Key, error) { func (n *DAGService) AddRecursive(nd *Node) error { _, err := n.Add(nd) if err != nil { + log.Info("AddRecursive Error: %s\n", err) return err } for _, link := range nd.Links { - if link.Node == nil { - panic("Why does this node have a nil link?\n") - } - err := n.AddRecursive(link.Node) - if err != nil { - return err + if link.Node != nil { + err := n.AddRecursive(link.Node) + if err != nil { + return err + } } } @@ -155,45 +202,3 @@ func (n *DAGService) Get(k u.Key) (*Node, error) { return Decoded(b.Data) } - -func FilePBData(data []byte) []byte { - pbfile := new(PBData) - typ := PBData_File - pbfile.Type = &typ - pbfile.Data = data - - data, err := proto.Marshal(pbfile) - if err != nil { - //this really shouldnt happen, i promise - panic(err) - } - return data -} - -func FolderPBData() []byte { - pbfile := new(PBData) - typ := PBData_Directory - pbfile.Type = &typ - - data, err := proto.Marshal(pbfile) - if err != nil { - //this really shouldnt happen, i promise - panic(err) - } - return data -} - -func WrapData(b []byte) []byte { - pbdata := new(PBData) - typ := PBData_Raw - pbdata.Data = b - pbdata.Type = &typ - - out, err := proto.Marshal(pbdata) - if err != nil { - // This shouldnt happen. seriously. - panic(err) - } - - return out -} diff --git a/merkledag/merkledag_test.go b/merkledag/merkledag_test.go index 7cd1649e27a..2db166beb3e 100644 --- a/merkledag/merkledag_test.go +++ b/merkledag/merkledag_test.go @@ -2,8 +2,9 @@ package merkledag import ( "fmt" - u "github.com/jbenet/go-ipfs/util" "testing" + + u "github.com/jbenet/go-ipfs/util" ) func TestNode(t *testing.T) { diff --git a/namesys/dns.go b/namesys/dns.go new file mode 100644 index 00000000000..66448511f85 --- /dev/null +++ b/namesys/dns.go @@ -0,0 +1,48 @@ +package namesys + +import ( + "net" + + b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + isd "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-is-domain" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + + u "github.com/jbenet/go-ipfs/util" +) + +// DNSResolver implements a Resolver on DNS domains +type DNSResolver struct { + // TODO: maybe some sort of caching? + // cache would need a timeout +} + +// CanResolve implements Resolver +func (r *DNSResolver) CanResolve(name string) bool { + return isd.IsDomain(name) +} + +// Resolve implements Resolver +// TXT records for a given domain name should contain a b58 +// encoded multihash. +func (r *DNSResolver) Resolve(name string) (string, error) { + log.Info("DNSResolver resolving %v", name) + txt, err := net.LookupTXT(name) + if err != nil { + return "", err + } + + for _, t := range txt { + chk := b58.Decode(t) + if len(chk) == 0 { + continue + } + + _, err := mh.Cast(chk) + if err != nil { + continue + } + return t, nil + } + + return "", u.ErrNotFound +} diff --git a/namesys/entry.pb.go b/namesys/entry.pb.go new file mode 100644 index 00000000000..d9dc5160b4f --- /dev/null +++ b/namesys/entry.pb.go @@ -0,0 +1,48 @@ +// Code generated by protoc-gen-go. +// source: entry.proto +// DO NOT EDIT! + +/* +Package namesys is a generated protocol buffer package. + +It is generated from these files: + entry.proto + +It has these top-level messages: + IpnsEntry +*/ +package namesys + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type IpnsEntry struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + Signature []byte `protobuf:"bytes,2,req,name=signature" json:"signature,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IpnsEntry) Reset() { *m = IpnsEntry{} } +func (m *IpnsEntry) String() string { return proto.CompactTextString(m) } +func (*IpnsEntry) ProtoMessage() {} + +func (m *IpnsEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *IpnsEntry) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func init() { +} diff --git a/namesys/entry.proto b/namesys/entry.proto new file mode 100644 index 00000000000..fee830d7ebb --- /dev/null +++ b/namesys/entry.proto @@ -0,0 +1,6 @@ +package namesys; + +message IpnsEntry { + required bytes value = 1; + required bytes signature = 2; +} diff --git a/namesys/interface.go b/namesys/interface.go new file mode 100644 index 00000000000..eef1fc32b24 --- /dev/null +++ b/namesys/interface.go @@ -0,0 +1,43 @@ +package namesys + +import ( + "errors" + + ci "github.com/jbenet/go-ipfs/crypto" +) + +// ErrResolveFailed signals an error when attempting to resolve. +var ErrResolveFailed = errors.New("could not resolve name.") + +// ErrPublishFailed signals an error when attempting to publish. +var ErrPublishFailed = errors.New("could not publish name.") + +// Namesys represents a cohesive name publishing and resolving system. +// +// Publishing a name is the process of establishing a mapping, a key-value +// pair, according to naming rules and databases. +// +// Resolving a name is the process of looking up the value associated with the +// key (name). +type NameSystem interface { + Resolver + Publisher +} + +// Resolver is an object capable of resolving names. +type Resolver interface { + + // Resolve looks up a name, and returns the value previously published. + Resolve(name string) (value string, err error) + + // CanResolve checks whether this Resolver can resolve a name + CanResolve(name string) bool +} + +// Publisher is an object capable of publishing particular names. +type Publisher interface { + + // Publish establishes a name-value mapping. + // TODO make this not PrivKey specific. + Publish(name ci.PrivKey, value string) error +} diff --git a/namesys/namesys.go b/namesys/namesys.go new file mode 100644 index 00000000000..2ea9a30bdfc --- /dev/null +++ b/namesys/namesys.go @@ -0,0 +1,57 @@ +package namesys + +import ( + ci "github.com/jbenet/go-ipfs/crypto" + routing "github.com/jbenet/go-ipfs/routing" +) + +// ipnsNameSystem implements IPNS naming. +// +// Uses three Resolvers: +// (a) ipfs routing naming: SFS-like PKI names. +// (b) dns domains: resolves using links in DNS TXT records +// (c) proquints: interprets string as the raw byte data. +// +// It can only publish to: (a) ipfs routing naming. +// +type ipns struct { + resolvers []Resolver + publisher Publisher +} + +// NewNameSystem will construct the IPFS naming system based on Routing +func NewNameSystem(r routing.IpfsRouting) NameSystem { + return &ipns{ + resolvers: []Resolver{ + new(DNSResolver), + new(ProquintResolver), + NewRoutingResolver(r), + }, + publisher: NewRoutingPublisher(r), + } +} + +// Resolve implements Resolver +func (ns *ipns) Resolve(name string) (string, error) { + for _, r := range ns.resolvers { + if r.CanResolve(name) { + return r.Resolve(name) + } + } + return "", ErrResolveFailed +} + +// CanResolve implements Resolver +func (ns *ipns) CanResolve(name string) bool { + for _, r := range ns.resolvers { + if r.CanResolve(name) { + return true + } + } + return false +} + +// Publish implements Publisher +func (ns *ipns) Publish(name ci.PrivKey, value string) error { + return ns.publisher.Publish(name, value) +} diff --git a/namesys/proquint.go b/namesys/proquint.go new file mode 100644 index 00000000000..89bbc4a44c4 --- /dev/null +++ b/namesys/proquint.go @@ -0,0 +1,24 @@ +package namesys + +import ( + "errors" + + proquint "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/bren2010/proquint" +) + +type ProquintResolver struct{} + +// CanResolve implements Resolver. Checks whether the name is a proquint string. +func (r *ProquintResolver) CanResolve(name string) bool { + ok, err := proquint.IsProquint(name) + return err == nil && ok +} + +// Resolve implements Resolver. Decodes the proquint string. +func (r *ProquintResolver) Resolve(name string) (string, error) { + ok := r.CanResolve(name) + if !ok { + return "", errors.New("not a valid proquint string") + } + return string(proquint.Decode(name)), nil +} diff --git a/namesys/publisher.go b/namesys/publisher.go new file mode 100644 index 00000000000..88533f8a089 --- /dev/null +++ b/namesys/publisher.go @@ -0,0 +1,78 @@ +package namesys + +import ( + "fmt" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + + ci "github.com/jbenet/go-ipfs/crypto" + routing "github.com/jbenet/go-ipfs/routing" + u "github.com/jbenet/go-ipfs/util" +) + +// ipnsPublisher is capable of publishing and resolving names to the IPFS +// routing system. +type ipnsPublisher struct { + routing routing.IpfsRouting +} + +// NewRoutingPublisher constructs a publisher for the IPFS Routing name system. +func NewRoutingPublisher(route routing.IpfsRouting) Publisher { + return &ipnsPublisher{routing: route} +} + +// Publish implements Publisher. Accepts a keypair and a value, +func (p *ipnsPublisher) Publish(k ci.PrivKey, value string) error { + log.Debug("namesys: Publish %s", value) + + // validate `value` is a ref (multihash) + _, err := mh.FromB58String(value) + if err != nil { + return fmt.Errorf("publish value must be str multihash. %v", err) + } + + ctx := context.TODO() + data, err := createRoutingEntryData(k, value) + if err != nil { + return err + } + pubkey := k.GetPublic() + pkbytes, err := pubkey.Bytes() + if err != nil { + return nil + } + + nameb := u.Hash(pkbytes) + namekey := u.Key(nameb).Pretty() + ipnskey := u.Hash([]byte("/ipns/" + namekey)) + + // Store associated public key + timectx, _ := context.WithDeadline(ctx, time.Now().Add(time.Second*4)) + err = p.routing.PutValue(timectx, u.Key(nameb), pkbytes) + if err != nil { + return err + } + + // Store ipns entry at h("/ipns/"+b58(h(pubkey))) + timectx, _ = context.WithDeadline(ctx, time.Now().Add(time.Second*4)) + err = p.routing.PutValue(timectx, u.Key(ipnskey), data) + if err != nil { + return err + } + + return nil +} + +func createRoutingEntryData(pk ci.PrivKey, val string) ([]byte, error) { + entry := new(IpnsEntry) + sig, err := pk.Sign([]byte(val)) + if err != nil { + return nil, err + } + entry.Signature = sig + entry.Value = []byte(val) + return proto.Marshal(entry) +} diff --git a/namesys/resolve_test.go b/namesys/resolve_test.go new file mode 100644 index 00000000000..5e652f42f8d --- /dev/null +++ b/namesys/resolve_test.go @@ -0,0 +1,53 @@ +package namesys + +import ( + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ci "github.com/jbenet/go-ipfs/crypto" + "github.com/jbenet/go-ipfs/peer" + mock "github.com/jbenet/go-ipfs/routing/mock" + u "github.com/jbenet/go-ipfs/util" +) + +func TestRoutingResolve(t *testing.T) { + local := &peer.Peer{ + ID: []byte("testID"), + } + lds := ds.NewMapDatastore() + d := mock.NewMockRouter(local, lds) + + resolver := NewRoutingResolver(d) + publisher := NewRoutingPublisher(d) + + privk, pubk, err := ci.GenerateKeyPair(ci.RSA, 512) + if err != nil { + t.Fatal(err) + } + + err = publisher.Publish(privk, "Hello") + if err == nil { + t.Fatal("should have errored out when publishing a non-multihash val") + } + + h := u.Key(u.Hash([]byte("Hello"))).Pretty() + err = publisher.Publish(privk, h) + if err != nil { + t.Fatal(err) + } + + pubkb, err := pubk.Bytes() + if err != nil { + t.Fatal(err) + } + + pkhash := u.Hash(pubkb) + res, err := resolver.Resolve(u.Key(pkhash).Pretty()) + if err != nil { + t.Fatal(err) + } + + if res != h { + t.Fatal("Got back incorrect value.") + } +} diff --git a/namesys/routing.go b/namesys/routing.go new file mode 100644 index 00000000000..da1c05d0e81 --- /dev/null +++ b/namesys/routing.go @@ -0,0 +1,85 @@ +package namesys + +import ( + "fmt" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + ci "github.com/jbenet/go-ipfs/crypto" + routing "github.com/jbenet/go-ipfs/routing" + u "github.com/jbenet/go-ipfs/util" +) + +var log = u.Logger("namesys") + +// routingResolver implements NSResolver for the main IPFS SFS-like naming +type routingResolver struct { + routing routing.IpfsRouting +} + +// NewRoutingResolver constructs a name resolver using the IPFS Routing system +// to implement SFS-like naming on top. +func NewRoutingResolver(route routing.IpfsRouting) Resolver { + return &routingResolver{routing: route} +} + +// CanResolve implements Resolver. Checks whether name is a b58 encoded string. +func (r *routingResolver) CanResolve(name string) bool { + _, err := mh.FromB58String(name) + return err == nil +} + +// Resolve implements Resolver. Uses the IPFS routing system to resolve SFS-like +// names. +func (r *routingResolver) Resolve(name string) (string, error) { + log.Debug("RoutingResolve: '%s'", name) + ctx := context.TODO() + hash, err := mh.FromB58String(name) + if err != nil { + log.Warning("RoutingResolve: bad input hash: [%s]\n", name) + return "", err + } + // name should be a multihash. if it isn't, error out here. + + // use the routing system to get the name. + // /ipns/ + h := u.Hash([]byte("/ipns/" + name)) + + ipnsKey := u.Key(h) + val, err := r.routing.GetValue(ctx, ipnsKey) + if err != nil { + log.Warning("RoutingResolve get failed.") + return "", err + } + + entry := new(IpnsEntry) + err = proto.Unmarshal(val, entry) + if err != nil { + return "", err + } + + // name should be a public key retrievable from ipfs + // /ipfs/ + key := u.Key(hash) + pkval, err := r.routing.GetValue(ctx, key) + if err != nil { + log.Warning("RoutingResolve PubKey Get failed.") + return "", err + } + + // get PublicKey from node.Data + pk, err := ci.UnmarshalPublicKey(pkval) + if err != nil { + return "", err + } + + // check sig with pk + if ok, err := pk.Verify(entry.GetValue(), entry.GetSignature()); err != nil || !ok { + return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pk) + } + + // ok sig checks out. this is a valid name. + return string(entry.GetValue()), nil +} diff --git a/net/conn/conn.go b/net/conn/conn.go index 645264b8da4..25ecc44d684 100644 --- a/net/conn/conn.go +++ b/net/conn/conn.go @@ -21,7 +21,7 @@ const MaxMessageSize = 1 << 20 // Conn represents a connection to another Peer (IPFS Node). type Conn struct { Peer *peer.Peer - Addr *ma.Multiaddr + Addr ma.Multiaddr Conn net.Conn Closed chan bool @@ -34,7 +34,7 @@ type Conn struct { type Map map[u.Key]*Conn // NewConn constructs a new connection -func NewConn(peer *peer.Peer, addr *ma.Multiaddr, nconn net.Conn) (*Conn, error) { +func NewConn(peer *peer.Peer, addr ma.Multiaddr, nconn net.Conn) (*Conn, error) { conn := &Conn{ Peer: peer, Addr: addr, @@ -56,7 +56,7 @@ func Dial(network string, peer *peer.Peer) (*Conn, error) { return nil, fmt.Errorf("No address for network %s", network) } - network, host, err := addr.DialArgs() + network, host, err := ma.DialArgs(addr) if err != nil { return nil, err } @@ -104,6 +104,6 @@ func (c *Conn) Close() error { // NetConnMultiaddr returns the net.Conn's address, recast as a multiaddr. // (consider moving this directly into the multiaddr package) -func NetConnMultiaddr(nconn net.Conn) (*ma.Multiaddr, error) { +func NetConnMultiaddr(nconn net.Conn) (ma.Multiaddr, error) { return ma.FromNetAddr(nconn.RemoteAddr()) } diff --git a/net/service/service.go b/net/service/service.go index f3d4ba5aa5d..db7cb82a832 100644 --- a/net/service/service.go +++ b/net/service/service.go @@ -82,7 +82,7 @@ func (s *Service) sendMessage(ctx context.Context, m msg.NetMessage, rid Request return err } - // u.DOut("Service send message [to = %s]\n", m.Peer().ID.Pretty()) + // u.DOut("Service send message [to = %s]\n", m.Peer()) // send message m2 := msg.New(m.Peer(), data) diff --git a/net/swarm/conn.go b/net/swarm/conn.go index 0713ccf0b8d..e4b980834a9 100644 --- a/net/swarm/conn.go +++ b/net/swarm/conn.go @@ -37,8 +37,8 @@ func (s *Swarm) listen() error { } // Listen for new connections on the given multiaddr -func (s *Swarm) connListen(maddr *ma.Multiaddr) error { - netstr, addr, err := maddr.DialArgs() +func (s *Swarm) connListen(maddr ma.Multiaddr) error { + netstr, addr, err := ma.DialArgs(maddr) if err != nil { return err } @@ -106,7 +106,7 @@ func (s *Swarm) connSetup(c *conn.Conn) error { } if c.Peer != nil { - u.DOut("Starting connection: %s\n", c.Peer.Key().Pretty()) + u.DOut("Starting connection: %s\n", c.Peer) } else { u.DOut("Starting connection: [unknown peer]\n") } @@ -115,7 +115,7 @@ func (s *Swarm) connSetup(c *conn.Conn) error { return fmt.Errorf("Conn securing error: %v", err) } - u.DOut("Secured connection: %s\n", c.Peer.Key().Pretty()) + u.DOut("Secured connection: %s\n", c.Peer) // add address of connection to Peer. Maybe it should happen in connSecure. c.Peer.AddAddress(c.Addr) @@ -184,8 +184,7 @@ func (s *Swarm) fanOut() { continue } - // u.DOut("[peer: %s] Sent message [to = %s]\n", - // s.local.ID.Pretty(), msg.Peer().ID.Pretty()) + // u.DOut("[peer: %s] Sent message [to = %s]\n", s.local, msg.Peer()) // queue it in the connection's buffer conn.Secure.Out <- msg.Data() @@ -208,13 +207,12 @@ func (s *Swarm) fanIn(c *conn.Conn) { case data, ok := <-c.Secure.In: if !ok { - e := fmt.Errorf("Error retrieving from conn: %v", c.Peer.Key().Pretty()) + e := fmt.Errorf("Error retrieving from conn: %v", c.Peer) s.errChan <- e goto out } - // u.DOut("[peer: %s] Received message [from = %s]\n", - // s.local.ID.Pretty(), c.Peer.ID.Pretty()) + // u.DOut("[peer: %s] Received message [from = %s]\n", s.local, c.Peer) msg := msg.New(c.Peer, data) s.Incoming <- msg diff --git a/net/swarm/swarm.go b/net/swarm/swarm.go index df84e5a94ce..9941c708608 100644 --- a/net/swarm/swarm.go +++ b/net/swarm/swarm.go @@ -143,7 +143,7 @@ func (s *Swarm) Dial(peer *peer.Peer) (*conn.Conn, error) { // DialAddr is for connecting to a peer when you know their addr but not their ID. // Should only be used when sure that not connected to peer in question // TODO(jbenet) merge with Dial? need way to patch back. -func (s *Swarm) DialAddr(addr *ma.Multiaddr) (*conn.Conn, error) { +func (s *Swarm) DialAddr(addr ma.Multiaddr) (*conn.Conn, error) { if addr == nil { return nil, errors.New("addr must be a non-nil Multiaddr") } diff --git a/net/swarm/swarm_test.go b/net/swarm/swarm_test.go index b2747481c9e..7d7c138025c 100644 --- a/net/swarm/swarm_test.go +++ b/net/swarm/swarm_test.go @@ -95,7 +95,7 @@ func TestSwarm(t *testing.T) { if a == nil { t.Fatal("error setting up peer (addr is nil)", peer) } - n, h, err := a.DialArgs() + n, h, err := ma.DialArgs(a) if err != nil { t.Fatal("error getting dial args from addr") } diff --git a/path/path.go b/path/path.go index a06fb98cb29..23920314075 100644 --- a/path/path.go +++ b/path/path.go @@ -10,6 +10,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("path") + // Resolver provides path resolution to IPFS // It has a pointer to a DAGService, which is uses to resolve nodes. type Resolver struct { @@ -20,7 +22,7 @@ type Resolver struct { // path component as a hash (key) of the first node, then resolves // all other components walking the links, with ResolveLinks. func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) { - u.DOut("Resolve: '%s'\n", fpath) + log.Debug("Resolve: '%s'", fpath) fpath = path.Clean(fpath) parts := strings.Split(fpath, "/") @@ -38,6 +40,7 @@ func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) { // first element in the path is a b58 hash (for now) h, err := mh.FromB58String(parts[0]) if err != nil { + u.DOut("given path element is not a base58 string.\n") return nil, err } @@ -65,10 +68,12 @@ func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) ( for _, name := range names { var next u.Key + var nlink *merkledag.Link // for each of the links in nd, the current object for _, link := range nd.Links { if link.Name == name { next = u.Key(link.Hash) + nlink = link break } } @@ -79,10 +84,15 @@ func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) ( return nil, fmt.Errorf("no link named %q under %s", name, h2) } - // fetch object for link and assign to nd - nd, err = s.DAG.Get(next) - if err != nil { - return nd, err + if nlink.Node == nil { + // fetch object for link and assign to nd + nd, err = s.DAG.Get(next) + if err != nil { + return nd, err + } + nlink.Node = nd + } else { + nd = nlink.Node } } return diff --git a/peer/peer.go b/peer/peer.go index 69d73c2d4af..ab071ea52dc 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -16,6 +16,11 @@ import ( // ID is a byte slice representing the identity of a peer. type ID mh.Multihash +// String is utililty function for printing out peer ID strings. +func (id ID) String() string { + return id.Pretty() +} + // Equal is utililty function for comparing two peer ID's func (id ID) Equal(other ID) bool { return bytes.Equal(id, other) @@ -38,7 +43,7 @@ type Map map[u.Key]*Peer // ID, and relevant Addresses. type Peer struct { ID ID - Addresses []*ma.Multiaddr + Addresses []ma.Multiaddr PrivKey ic.PrivKey PubKey ic.PubKey @@ -48,13 +53,18 @@ type Peer struct { sync.RWMutex } +// String prints out the peer. +func (p *Peer) String() string { + return "[Peer " + p.ID.String() + "]" +} + // Key returns the ID as a Key (string) for maps. func (p *Peer) Key() u.Key { return u.Key(p.ID) } // AddAddress adds the given Multiaddr address to Peer's addresses. -func (p *Peer) AddAddress(a *ma.Multiaddr) { +func (p *Peer) AddAddress(a ma.Multiaddr) { p.Lock() defer p.Unlock() @@ -67,17 +77,12 @@ func (p *Peer) AddAddress(a *ma.Multiaddr) { } // NetAddress returns the first Multiaddr found for a given network. -func (p *Peer) NetAddress(n string) *ma.Multiaddr { +func (p *Peer) NetAddress(n string) ma.Multiaddr { p.RLock() defer p.RUnlock() for _, a := range p.Addresses { - ps, err := a.Protocols() - if err != nil { - continue // invalid addr - } - - for _, p := range ps { + for _, p := range a.Protocols() { if p.Name == n { return a } diff --git a/peer/peerstore.go b/peer/peerstore.go index 9c0f28df316..e8eb0eac00f 100644 --- a/peer/peerstore.go +++ b/peer/peerstore.go @@ -37,7 +37,7 @@ func (p *peerstore) Get(i ID) (*Peer, error) { p.RLock() defer p.RUnlock() - k := ds.NewKey(string(i)) + k := u.Key(i).DsKey() val, err := p.peers.Get(k) if err != nil { return nil, err @@ -54,7 +54,7 @@ func (p *peerstore) Put(peer *Peer) error { p.Lock() defer p.Unlock() - k := ds.NewKey(string(peer.ID)) + k := u.Key(peer.ID).DsKey() return p.peers.Put(k, peer) } @@ -62,7 +62,7 @@ func (p *peerstore) Delete(i ID) error { p.Lock() defer p.Unlock() - k := ds.NewKey(string(i)) + k := u.Key(i).DsKey() return p.peers.Delete(k) } @@ -84,7 +84,7 @@ func (p *peerstore) All() (*Map, error) { pval, ok := val.(*Peer) if ok { - (*ps)[u.Key(k.String())] = pval + (*ps)[u.Key(pval.ID)] = pval } } return ps, nil diff --git a/peer/queue/queue_test.go b/peer/queue/queue_test.go index 8a7d22189fc..a4812befab8 100644 --- a/peer/queue/queue_test.go +++ b/peer/queue/queue_test.go @@ -68,7 +68,7 @@ func TestQueue(t *testing.T) { func newPeerTime(t time.Time) *peer.Peer { s := fmt.Sprintf("hmmm time: %v", t) - h, _ := u.Hash([]byte(s)) + h := u.Hash([]byte(s)) return &peer.Peer{ID: peer.ID(h)} } diff --git a/routing/dht/Message.go b/routing/dht/Message.go index 1be9a3b801e..84d323c37b9 100644 --- a/routing/dht/Message.go +++ b/routing/dht/Message.go @@ -20,11 +20,7 @@ func peerToPBPeer(p *peer.Peer) *Message_Peer { if len(p.Addresses) == 0 || p.Addresses[0] == nil { pbp.Addr = proto.String("") } else { - addr, err := p.Addresses[0].String() - if err != nil { - //Temp: what situations could cause this? - panic(err) - } + addr := p.Addresses[0].String() pbp.Addr = &addr } pid := string(p.ID) diff --git a/routing/dht/dht.go b/routing/dht/dht.go index 8ebecd5bd88..c95e0751136 100644 --- a/routing/dht/dht.go +++ b/routing/dht/dht.go @@ -21,6 +21,8 @@ import ( "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) +var log = u.Logger("dht") + // TODO. SEE https://github.com/jbenet/node-ipfs/blob/master/submodules/ipfs-dht/index.js // IpfsDHT is an implementation of Kademlia with Coral and S/Kademlia modifications. @@ -65,8 +67,8 @@ func NewDHT(p *peer.Peer, ps peer.Peerstore, net inet.Network, sender inet.Sende dht.providers = NewProviderManager(p.ID) dht.routingTables = make([]*kb.RoutingTable, 3) - dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*30) - dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*100) + dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*1000) + dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*1000) dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour) dht.birth = time.Now() return dht @@ -74,7 +76,7 @@ func NewDHT(p *peer.Peer, ps peer.Peerstore, net inet.Network, sender inet.Sende // Connect to a new peer at the given address, ping and add to the routing table func (dht *IpfsDHT) Connect(ctx context.Context, npeer *peer.Peer) (*peer.Peer, error) { - u.DOut("Connect to new peer: %s\n", npeer.ID.Pretty()) + log.Debug("Connect to new peer: %s\n", npeer) // TODO(jbenet,whyrusleeping) // @@ -129,9 +131,8 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N dht.Update(mPeer) // Print out diagnostic - u.DOut("[peer: %s] Got message type: '%s' [from = %s]\n", - dht.self.ID.Pretty(), - Message_MessageType_name[int32(pmes.GetType())], mPeer.ID.Pretty()) + log.Debug("[peer: %s] Got message type: '%s' [from = %s]\n", + dht.self, Message_MessageType_name[int32(pmes.GetType())], mPeer) // get handler for this msg type. handler := dht.handlerForMsgType(pmes.GetType()) @@ -174,9 +175,8 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message start := time.Now() // Print out diagnostic - u.DOut("[peer: %s] Sent message type: '%s' [to = %s]\n", - dht.self.ID.Pretty(), - Message_MessageType_name[int32(pmes.GetType())], p.ID.Pretty()) + log.Debug("Sent message type: '%s' [to = %s]", + Message_MessageType_name[int32(pmes.GetType())], p) rmes, err := dht.sender.SendRequest(ctx, mes) if err != nil { @@ -221,7 +221,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p *peer.Peer, key string) e return err } - u.DOut("[%s] putProvider: %s for %s\n", dht.self.ID.Pretty(), p.ID.Pretty(), key) + log.Debug("%s putProvider: %s for %s", dht.self, p, key) if *rpmes.Key != *pmes.Key { return errors.New("provider not added correctly") } @@ -237,10 +237,10 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer, return nil, nil, err } - u.DOut("pmes.GetValue() %v\n", pmes.GetValue()) + log.Debug("pmes.GetValue() %v", pmes.GetValue()) if value := pmes.GetValue(); value != nil { // Success! We were given the value - u.DOut("getValueOrPeers: got value\n") + log.Debug("getValueOrPeers: got value") return value, nil, nil } @@ -250,7 +250,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer, if err != nil { return nil, nil, err } - u.DOut("getValueOrPeers: get from providers\n") + log.Debug("getValueOrPeers: get from providers") return val, nil, nil } @@ -263,7 +263,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer, addr, err := ma.NewMultiaddr(pb.GetAddr()) if err != nil { - u.PErr("%v\n", err.Error()) + log.Error("%v", err.Error()) continue } @@ -278,11 +278,11 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer, } if len(peers) > 0 { - u.DOut("getValueOrPeers: peers\n") + log.Debug("getValueOrPeers: peers") return nil, peers, nil } - u.DOut("getValueOrPeers: u.ErrNotFound\n") + log.Warning("getValueOrPeers: u.ErrNotFound") return nil, nil, u.ErrNotFound } @@ -304,13 +304,13 @@ func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key, for _, pinfo := range peerlist { p, err := dht.ensureConnectedToPeer(pinfo) if err != nil { - u.DErr("getFromPeers error: %s\n", err) + log.Error("getFromPeers error: %s", err) continue } pmes, err := dht.getValueSingle(ctx, p, key, level) if err != nil { - u.DErr("getFromPeers error: %s\n", err) + log.Error("getFromPeers error: %s\n", err) continue } @@ -326,7 +326,7 @@ func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key, func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) { dht.dslock.Lock() defer dht.dslock.Unlock() - v, err := dht.datastore.Get(ds.NewKey(string(key))) + v, err := dht.datastore.Get(key.DsKey()) if err != nil { return nil, err } @@ -339,12 +339,13 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) { } func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error { - return dht.datastore.Put(ds.NewKey(string(key)), value) + return dht.datastore.Put(key.DsKey(), value) } // Update signals to all routingTables to Update their last-seen status // on the given peer. func (dht *IpfsDHT) Update(p *peer.Peer) { + log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds()) removedCount := 0 for _, route := range dht.routingTables { removed := route.Update(p) @@ -396,11 +397,11 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer for _, prov := range peers { p, err := dht.peerFromInfo(prov) if err != nil { - u.PErr("error getting peer from info: %v\n", err) + log.Error("error getting peer from info: %v", err) continue } - u.DOut("[%s] adding provider: %s for %s", dht.self.ID.Pretty(), p, key) + log.Debug("%s adding provider: %s for %s", dht.self, p, key) // Dont add outselves to the list if p.ID.Equal(dht.self.ID) { @@ -435,7 +436,7 @@ func (dht *IpfsDHT) betterPeerToQuery(pmes *Message) *peer.Peer { // == to self? nil if closer.ID.Equal(dht.self.ID) { - u.DOut("Attempted to return self! this shouldnt happen...\n") + log.Error("Attempted to return self! this shouldnt happen...") return nil } @@ -491,13 +492,19 @@ func (dht *IpfsDHT) ensureConnectedToPeer(pbp *Message_Peer) (*peer.Peer, error) return p, err } +//TODO: this should be smarter about which keys it selects. func (dht *IpfsDHT) loadProvidableKeys() error { kl, err := dht.datastore.KeyList() if err != nil { return err } - for _, k := range kl { - dht.providers.AddProvider(u.Key(k.Bytes()), dht.self) + for _, dsk := range kl { + k := u.KeyFromDsKey(dsk) + if len(k) == 0 { + log.Error("loadProvidableKeys error: %v", dsk) + } + + dht.providers.AddProvider(k, dht.self) } return nil } diff --git a/routing/dht/dht_test.go b/routing/dht/dht_test.go index 1bbc62cdc0f..23bdb88e72b 100644 --- a/routing/dht/dht_test.go +++ b/routing/dht/dht_test.go @@ -43,8 +43,8 @@ func setupDHT(t *testing.T, p *peer.Peer) *IpfsDHT { return d } -func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) { - var addrs []*ma.Multiaddr +func setupDHTS(n int, t *testing.T) ([]ma.Multiaddr, []*peer.Peer, []*IpfsDHT) { + var addrs []ma.Multiaddr for i := 0; i < n; i++ { a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i)) if err != nil { @@ -67,7 +67,7 @@ func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) return addrs, peers, dhts } -func makePeer(addr *ma.Multiaddr) *peer.Peer { +func makePeer(addr ma.Multiaddr) *peer.Peer { p := new(peer.Peer) p.AddAddress(addr) sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) @@ -287,7 +287,7 @@ func TestProvidesAsync(t *testing.T) { select { case p := <-provs: if !p.ID.Equal(dhts[3].self.ID) { - t.Fatalf("got a provider, but not the right one. %v", p.ID.Pretty()) + t.Fatalf("got a provider, but not the right one. %s", p) } case <-ctx.Done(): t.Fatal("Didnt get back providers") diff --git a/routing/dht/ext_test.go b/routing/dht/ext_test.go index f8b9293a870..df8f26ff3d7 100644 --- a/routing/dht/ext_test.go +++ b/routing/dht/ext_test.go @@ -184,7 +184,7 @@ func TestGetFailures(t *testing.T) { func _randPeer() *peer.Peer { p := new(peer.Peer) p.ID = make(peer.ID, 16) - p.Addresses = []*ma.Multiaddr{nil} + p.Addresses = []ma.Multiaddr{nil} crand.Read(p.ID) return p } diff --git a/routing/dht/handlers.go b/routing/dht/handlers.go index 4301d1e4e97..417dd0918f1 100644 --- a/routing/dht/handlers.go +++ b/routing/dht/handlers.go @@ -38,7 +38,7 @@ func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler { } func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error) { - u.DOut("[%s] handleGetValue for key: %s\n", dht.self.ID.Pretty(), pmes.GetKey()) + log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey()) // setup response resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) @@ -50,10 +50,10 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error } // let's first check if we have the value locally. - u.DOut("[%s] handleGetValue looking into ds\n", dht.self.ID.Pretty()) - dskey := ds.NewKey(pmes.GetKey()) + log.Debug("%s handleGetValue looking into ds\n", dht.self) + dskey := u.Key(pmes.GetKey()).DsKey() iVal, err := dht.datastore.Get(dskey) - u.DOut("[%s] handleGetValue looking into ds GOT %v\n", dht.self.ID.Pretty(), iVal) + log.Debug("%s handleGetValue looking into ds GOT %v\n", dht.self, iVal) // if we got an unexpected error, bail. if err != nil && err != ds.ErrNotFound { @@ -65,7 +65,7 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error // if we have the value, send it back if err == nil { - u.DOut("[%s] handleGetValue success!\n", dht.self.ID.Pretty()) + log.Debug("%s handleGetValue success!\n", dht.self) byts, ok := iVal.([]byte) if !ok { @@ -78,14 +78,14 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error // if we know any providers for the requested value, return those. provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) if len(provs) > 0 { - u.DOut("handleGetValue returning %d provider[s]\n", len(provs)) + log.Debug("handleGetValue returning %d provider[s]\n", len(provs)) resp.ProviderPeers = peersToPBPeers(provs) } // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeerToQuery(pmes) if closer != nil { - u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty()) + log.Debug("handleGetValue returning a closer peer: '%s'\n", closer) resp.CloserPeers = peersToPBPeers([]*peer.Peer{closer}) } @@ -96,14 +96,14 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error) { dht.dslock.Lock() defer dht.dslock.Unlock() - dskey := ds.NewKey(pmes.GetKey()) + dskey := u.Key(pmes.GetKey()).DsKey() err := dht.datastore.Put(dskey, pmes.GetValue()) - u.DOut("[%s] handlePutValue %v %v\n", dht.self.ID.Pretty(), dskey, pmes.GetValue()) + log.Debug("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue()) return pmes, err } func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *Message) (*Message, error) { - u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty()) + log.Debug("%s Responding to ping from %s!\n", dht.self, p) return pmes, nil } @@ -119,16 +119,16 @@ func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error } if closest == nil { - u.PErr("handleFindPeer: could not find anything.\n") + log.Error("handleFindPeer: could not find anything.\n") return resp, nil } if len(closest.Addresses) == 0 { - u.PErr("handleFindPeer: no addresses for connected peer...\n") + log.Error("handleFindPeer: no addresses for connected peer...\n") return resp, nil } - u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) + log.Debug("handleFindPeer: sending back '%s'\n", closest) resp.CloserPeers = peersToPBPeers([]*peer.Peer{closest}) return resp, nil } @@ -137,9 +137,11 @@ func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *Message) (*Message, e resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // check if we have this value, to add ourselves as provider. - has, err := dht.datastore.Has(ds.NewKey(pmes.GetKey())) + log.Debug("handling GetProviders: '%s'", pmes.GetKey()) + dsk := u.Key(pmes.GetKey()).DsKey() + has, err := dht.datastore.Has(dsk) if err != nil && err != ds.ErrNotFound { - u.PErr("unexpected datastore error: %v\n", err) + log.Error("unexpected datastore error: %v\n", err) has = false } @@ -171,8 +173,7 @@ type providerInfo struct { func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, error) { key := u.Key(pmes.GetKey()) - u.DOut("[%s] Adding [%s] as a provider for '%s'\n", - dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty()) + log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key)) dht.providers.AddProvider(key, p) return pmes, nil // send back same msg as confirmation. @@ -191,7 +192,7 @@ func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, err for _, ps := range seq { _, err := msg.FromObject(ps, pmes) if err != nil { - u.PErr("handleDiagnostics error creating message: %v\n", err) + log.Error("handleDiagnostics error creating message: %v\n", err) continue } // dht.sender.SendRequest(context.TODO(), mes) diff --git a/routing/dht/query.go b/routing/dht/query.go index 4db3f70e723..0a9ca0bd830 100644 --- a/routing/dht/query.go +++ b/routing/dht/query.go @@ -101,6 +101,12 @@ func newQueryRunner(ctx context.Context, q *dhtQuery) *dhtQueryRunner { } func (r *dhtQueryRunner) Run(peers []*peer.Peer) (*dhtQueryResult, error) { + log.Debug("Run query with %d peers.", len(peers)) + if len(peers) == 0 { + log.Warning("Running query with no peers!") + return nil, nil + } + // setup concurrency rate limiting for i := 0; i < r.query.concurrency; i++ { r.rateLimit <- struct{}{} @@ -145,7 +151,7 @@ func (r *dhtQueryRunner) Run(peers []*peer.Peer) (*dhtQueryResult, error) { func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) { if next == nil { // wtf why are peers nil?!? - u.PErr("Query getting nil peers!!!\n") + log.Error("Query getting nil peers!!!\n") return } @@ -164,7 +170,7 @@ func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) { r.peersSeen[next.Key()] = next r.Unlock() - u.DOut("adding peer to query: %v\n", next.ID.Pretty()) + log.Debug("adding peer to query: %v\n", next) // do this after unlocking to prevent possible deadlocks. r.peersRemaining.Increment(1) @@ -188,14 +194,14 @@ func (r *dhtQueryRunner) spawnWorkers() { if !more { return // channel closed. } - u.DOut("spawning worker for: %v\n", p.ID.Pretty()) + log.Debug("spawning worker for: %v\n", p) go r.queryPeer(p) } } } func (r *dhtQueryRunner) queryPeer(p *peer.Peer) { - u.DOut("spawned worker for: %v\n", p.ID.Pretty()) + log.Debug("spawned worker for: %v\n", p) // make sure we rate limit concurrency. select { @@ -205,33 +211,33 @@ func (r *dhtQueryRunner) queryPeer(p *peer.Peer) { return } - u.DOut("running worker for: %v\n", p.ID.Pretty()) + log.Debug("running worker for: %v\n", p) // finally, run the query against this peer res, err := r.query.qfunc(r.ctx, p) if err != nil { - u.DOut("ERROR worker for: %v %v\n", p.ID.Pretty(), err) + log.Debug("ERROR worker for: %v %v\n", p, err) r.Lock() r.errs = append(r.errs, err) r.Unlock() } else if res.success { - u.DOut("SUCCESS worker for: %v\n", p.ID.Pretty(), res) + log.Debug("SUCCESS worker for: %v\n", p, res) r.Lock() r.result = res r.Unlock() r.cancel() // signal to everyone that we're done. } else if res.closerPeers != nil { - u.DOut("PEERS CLOSER -- worker for: %v\n", p.ID.Pretty()) + log.Debug("PEERS CLOSER -- worker for: %v\n", p) for _, next := range res.closerPeers { r.addPeerToQuery(next, p) } } // signal we're done proccessing peer p - u.DOut("completing worker for: %v\n", p.ID.Pretty()) + log.Debug("completing worker for: %v\n", p) r.peersRemaining.Decrement(1) r.rateLimit <- struct{}{} } diff --git a/routing/dht/routing.go b/routing/dht/routing.go index 66ae0984879..d29a46fef0e 100644 --- a/routing/dht/routing.go +++ b/routing/dht/routing.go @@ -18,16 +18,20 @@ import ( // PutValue adds value corresponding to given Key. // This is the top level "Store" operation of the DHT func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error { - peers := []*peer.Peer{} + log.Debug("PutValue %s", key) + err := dht.putLocal(key, value) + if err != nil { + return err + } - // get the peers we need to announce to + var peers []*peer.Peer for _, route := range dht.routingTables { npeers := route.NearestPeers(kb.ConvertKey(key), KValue) peers = append(peers, npeers...) } query := newQuery(key, func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) { - u.DOut("[%s] PutValue qry part %v\n", dht.self.ID.Pretty(), p.ID.Pretty()) + log.Debug("%s PutValue qry part %v", dht.self, p) err := dht.putValueToNetwork(ctx, p, string(key), value) if err != nil { return nil, err @@ -35,8 +39,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error return &dhtQueryResult{success: true}, nil }) - _, err := query.Run(ctx, peers) - u.DOut("[%s] PutValue %v %v\n", dht.self.ID.Pretty(), key, value) + _, err = query.Run(ctx, peers) return err } @@ -44,14 +47,13 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error // If the search does not succeed, a multiaddr string of a closer peer is // returned along with util.ErrSearchIncomplete func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) { - ll := startNewRPC("GET") - defer ll.EndAndPrint() + log.Debug("Get Value [%s]", key) // If we have it local, dont bother doing an RPC! // NOTE: this might not be what we want to do... val, err := dht.getLocal(key) if err == nil { - ll.Success = true + log.Debug("Got value locally!") return val, nil } @@ -84,7 +86,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) { return nil, err } - u.DOut("[%s] GetValue %v %v\n", dht.self.ID.Pretty(), key, result.value) + log.Debug("GetValue %v %v", key, result.value) if result.value == nil { return nil, u.ErrNotFound } @@ -151,15 +153,14 @@ func (dht *IpfsDHT) FindProvidersAsync2(ctx context.Context, key u.Key, count in peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(key), AlphaValue) for _, pp := range peers { - ppp := pp - go func() { - pmes, err := dht.findProvidersSingle(ctx, ppp, key, 0) + go func(p *peer.Peer) { + pmes, err := dht.findProvidersSingle(ctx, p, key, 0) if err != nil { u.PErr("%v\n", err) return } dht.addPeerListAsync(key, pmes.GetProviderPeers(), ps, count, peerOut) - }() + }(pp) } }() @@ -187,13 +188,11 @@ func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*Message_Peer, ps *peerSet // FindProviders searches for peers who can provide the value for given key. func (dht *IpfsDHT) FindProviders(ctx context.Context, key u.Key) ([]*peer.Peer, error) { - ll := startNewRPC("FindProviders") - ll.EndAndPrint() - // get closest peer - u.DOut("Find providers for: '%s'\n", key) + log.Debug("Find providers for: '%s'", key) p := dht.routingTables[0].NearestPeer(kb.ConvertKey(key)) if p == nil { + log.Warning("Got no nearest peer for find providers: '%s'", key) return nil, nil } @@ -335,17 +334,17 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee // Ping a peer, log the time it took func (dht *IpfsDHT) Ping(ctx context.Context, p *peer.Peer) error { // Thoughts: maybe this should accept an ID and do a peer lookup? - u.DOut("[%s] ping %s start\n", dht.self.ID.Pretty(), p.ID.Pretty()) + log.Info("ping %s start", p) pmes := newMessage(Message_PING, "", 0) _, err := dht.sendRequest(ctx, p, pmes) - u.DOut("[%s] ping %s end (err = %s)\n", dht.self.ID.Pretty(), p.ID.Pretty(), err) + log.Info("ping %s end (err = %s)", p, err) return err } func (dht *IpfsDHT) getDiagnostic(ctx context.Context) ([]*diagInfo, error) { - u.DOut("Begin Diagnostic") + log.Info("Begin Diagnostic") peers := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10) var out []*diagInfo diff --git a/routing/kbucket/table.go b/routing/kbucket/table.go index 2a0f16d1a0b..242546ba403 100644 --- a/routing/kbucket/table.go +++ b/routing/kbucket/table.go @@ -11,6 +11,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) +var log = u.Logger("table") + // RoutingTable defines the routing table. type RoutingTable struct { @@ -138,6 +140,8 @@ func (rt *RoutingTable) NearestPeer(id ID) *peer.Peer { if len(peers) > 0 { return peers[0] } + + log.Error("NearestPeer: Returning nil, table size = %d", rt.Size()) return nil } diff --git a/routing/kbucket/table_test.go b/routing/kbucket/table_test.go index 49be52c6553..cc1cdfba1da 100644 --- a/routing/kbucket/table_test.go +++ b/routing/kbucket/table_test.go @@ -101,7 +101,7 @@ func TestTableFind(t *testing.T) { rt.Update(peers[i]) } - t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty()) + t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeer(ConvertPeerID(peers[2].ID)) if !found.ID.Equal(peers[2].ID) { t.Fatalf("Failed to lookup known node...") @@ -118,7 +118,7 @@ func TestTableFindMultiple(t *testing.T) { rt.Update(peers[i]) } - t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty()) + t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeers(ConvertPeerID(peers[2].ID), 15) if len(found) != 15 { t.Fatalf("Got back different number of peers than we expected.") diff --git a/routing/keyspace/xor_test.go b/routing/keyspace/xor_test.go index d7d83afa206..7963ea014a8 100644 --- a/routing/keyspace/xor_test.go +++ b/routing/keyspace/xor_test.go @@ -113,8 +113,8 @@ func TestDistancesAndCenterSorting(t *testing.T) { keys[i] = Key{Space: XORKeySpace, Bytes: a} } - cmp := func(a int, b *big.Int) int { - return big.NewInt(int64(a)).Cmp(b) + cmp := func(a int64, b *big.Int) int { + return big.NewInt(a).Cmp(b) } if 0 != cmp(0, keys[2].Distance(keys[3])) { diff --git a/routing/mock/routing.go b/routing/mock/routing.go index e5fdb96fca0..954914c3bed 100644 --- a/routing/mock/routing.go +++ b/routing/mock/routing.go @@ -33,11 +33,11 @@ func (mr *MockRouter) SetRoutingServer(rs RoutingServer) { } func (mr *MockRouter) PutValue(ctx context.Context, key u.Key, val []byte) error { - return mr.datastore.Put(ds.NewKey(string(key)), val) + return mr.datastore.Put(key.DsKey(), val) } func (mr *MockRouter) GetValue(ctx context.Context, key u.Key) ([]byte, error) { - v, err := mr.datastore.Get(ds.NewKey(string(key))) + v, err := mr.datastore.Get(key.DsKey()) if err != nil { return nil, err } diff --git a/server/http/http.go b/server/http/http.go index 75623d5f2ab..14eff9cd204 100644 --- a/server/http/http.go +++ b/server/http/http.go @@ -17,14 +17,14 @@ type handler struct { } // Serve starts the http server -func Serve(address *ma.Multiaddr, node *core.IpfsNode) error { +func Serve(address ma.Multiaddr, node *core.IpfsNode) error { r := mux.NewRouter() handler := &handler{&ipfsHandler{node}} r.HandleFunc("/ipfs/", handler.postHandler).Methods("POST") r.PathPrefix("/ipfs/").Handler(handler).Methods("GET") http.Handle("/", r) - _, host, err := address.DialArgs() + _, host, err := ma.DialArgs(address) if err != nil { return err } diff --git a/server/http/ipfs.go b/server/http/ipfs.go index 6a177023f8d..bd094bada9e 100644 --- a/server/http/ipfs.go +++ b/server/http/ipfs.go @@ -6,6 +6,7 @@ import ( core "github.com/jbenet/go-ipfs/core" "github.com/jbenet/go-ipfs/importer" dag "github.com/jbenet/go-ipfs/merkledag" + uio "github.com/jbenet/go-ipfs/unixfs/io" u "github.com/jbenet/go-ipfs/util" ) @@ -33,5 +34,5 @@ func (i *ipfsHandler) AddNodeToDAG(nd *dag.Node) (u.Key, error) { } func (i *ipfsHandler) NewDagReader(nd *dag.Node) (io.Reader, error) { - return dag.NewDagReader(nd, i.node.DAG) + return uio.NewDagReader(nd, i.node.DAG) } diff --git a/unixfs/Makefile b/unixfs/Makefile new file mode 100644 index 00000000000..87f182fe5bf --- /dev/null +++ b/unixfs/Makefile @@ -0,0 +1,5 @@ +all: data.pb.go + +data.pb.go: data.proto + protoc --go_out=. data.proto + diff --git a/merkledag/data.pb.go b/unixfs/data.pb.go similarity index 73% rename from merkledag/data.pb.go rename to unixfs/data.pb.go index d2f97d33fd0..2efdd8a4ca4 100644 --- a/merkledag/data.pb.go +++ b/unixfs/data.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package merkledag is a generated protocol buffer package. +Package unixfs is a generated protocol buffer package. It is generated from these files: data.proto @@ -11,7 +11,7 @@ It is generated from these files: It has these top-level messages: PBData */ -package merkledag +package unixfs import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" @@ -57,8 +57,10 @@ func (x *PBData_DataType) UnmarshalJSON(data []byte) error { } type PBData struct { - Type *PBData_DataType `protobuf:"varint,1,req,enum=merkledag.PBData_DataType" json:"Type,omitempty"` + Type *PBData_DataType `protobuf:"varint,1,req,enum=unixfs.PBData_DataType" json:"Type,omitempty"` Data []byte `protobuf:"bytes,2,opt" json:"Data,omitempty"` + Filesize *uint64 `protobuf:"varint,3,opt,name=filesize" json:"filesize,omitempty"` + Blocksizes []uint64 `protobuf:"varint,4,rep,name=blocksizes" json:"blocksizes,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -80,6 +82,20 @@ func (m *PBData) GetData() []byte { return nil } +func (m *PBData) GetFilesize() uint64 { + if m != nil && m.Filesize != nil { + return *m.Filesize + } + return 0 +} + +func (m *PBData) GetBlocksizes() []uint64 { + if m != nil { + return m.Blocksizes + } + return nil +} + func init() { - proto.RegisterEnum("merkledag.PBData_DataType", PBData_DataType_name, PBData_DataType_value) + proto.RegisterEnum("unixfs.PBData_DataType", PBData_DataType_name, PBData_DataType_value) } diff --git a/merkledag/data.proto b/unixfs/data.proto similarity index 62% rename from merkledag/data.proto rename to unixfs/data.proto index 99c8a224b75..b9504b0c324 100644 --- a/merkledag/data.proto +++ b/unixfs/data.proto @@ -1,4 +1,4 @@ -package merkledag; +package unixfs; message PBData { enum DataType { @@ -9,4 +9,6 @@ message PBData { required DataType Type = 1; optional bytes Data = 2; + optional uint64 filesize = 3; + repeated uint64 blocksizes = 4; } diff --git a/unixfs/format.go b/unixfs/format.go new file mode 100644 index 00000000000..6ba8e3aa475 --- /dev/null +++ b/unixfs/format.go @@ -0,0 +1,119 @@ +// Package format implements a data format for files in the ipfs filesystem +// It is not the only format in ipfs, but it is the one that the filesystem assumes +package unixfs + +import ( + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +var ErrMalformedFileFormat = errors.New("malformed data in file format") +var ErrInvalidDirLocation = errors.New("found directory node in unexpected place") +var ErrUnrecognizedType = errors.New("unrecognized node type") + +func FromBytes(data []byte) (*PBData, error) { + pbdata := new(PBData) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return nil, err + } + return pbdata, nil +} + +func FilePBData(data []byte, totalsize uint64) []byte { + pbfile := new(PBData) + typ := PBData_File + pbfile.Type = &typ + pbfile.Data = data + pbfile.Filesize = proto.Uint64(totalsize) + + data, err := proto.Marshal(pbfile) + if err != nil { + // This really shouldnt happen, i promise + // The only failure case for marshal is if required fields + // are not filled out, and they all are. If the proto object + // gets changed and nobody updates this function, the code + // should panic due to programmer error + panic(err) + } + return data +} + +// Returns Bytes that represent a Directory +func FolderPBData() []byte { + pbfile := new(PBData) + typ := PBData_Directory + pbfile.Type = &typ + + data, err := proto.Marshal(pbfile) + if err != nil { + //this really shouldnt happen, i promise + panic(err) + } + return data +} + +func WrapData(b []byte) []byte { + pbdata := new(PBData) + typ := PBData_Raw + pbdata.Data = b + pbdata.Type = &typ + + out, err := proto.Marshal(pbdata) + if err != nil { + // This shouldnt happen. seriously. + panic(err) + } + + return out +} + +func UnwrapData(data []byte) ([]byte, error) { + pbdata := new(PBData) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return nil, err + } + return pbdata.GetData(), nil +} + +func DataSize(data []byte) (uint64, error) { + pbdata := new(PBData) + err := proto.Unmarshal(data, pbdata) + if err != nil { + return 0, err + } + + switch pbdata.GetType() { + case PBData_Directory: + return 0, errors.New("Cant get data size of directory!") + case PBData_File: + return pbdata.GetFilesize(), nil + case PBData_Raw: + return uint64(len(pbdata.GetData())), nil + default: + return 0, errors.New("Unrecognized node data type!") + } +} + +type MultiBlock struct { + Data []byte + blocksizes []uint64 + subtotal uint64 +} + +func (mb *MultiBlock) AddBlockSize(s uint64) { + mb.subtotal += s + mb.blocksizes = append(mb.blocksizes, s) +} + +func (mb *MultiBlock) GetBytes() ([]byte, error) { + pbn := new(PBData) + t := PBData_File + pbn.Type = &t + pbn.Filesize = proto.Uint64(uint64(len(mb.Data)) + mb.subtotal) + pbn.Blocksizes = mb.blocksizes + pbn.Data = mb.Data + return proto.Marshal(pbn) +} diff --git a/unixfs/format_test.go b/unixfs/format_test.go new file mode 100644 index 00000000000..eca926e9f18 --- /dev/null +++ b/unixfs/format_test.go @@ -0,0 +1,36 @@ +package unixfs + +import ( + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +func TestMultiBlock(t *testing.T) { + mbf := new(MultiBlock) + for i := 0; i < 15; i++ { + mbf.AddBlockSize(100) + } + + mbf.Data = make([]byte, 128) + + b, err := mbf.GetBytes() + if err != nil { + t.Fatal(err) + } + + pbn := new(PBData) + err = proto.Unmarshal(b, pbn) + if err != nil { + t.Fatal(err) + } + + ds, err := DataSize(b) + if err != nil { + t.Fatal(err) + } + + if ds != (100*15)+128 { + t.Fatal("Datasize calculations incorrect!") + } +} diff --git a/unixfs/io/dagmodifier.go b/unixfs/io/dagmodifier.go new file mode 100644 index 00000000000..8680da46a27 --- /dev/null +++ b/unixfs/io/dagmodifier.go @@ -0,0 +1,192 @@ +package io + +import ( + "bytes" + "errors" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + + "github.com/jbenet/go-ipfs/importer/chunk" + mdag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + u "github.com/jbenet/go-ipfs/util" +) + +// DagModifier is the only struct licensed and able to correctly +// perform surgery on a DAG 'file' +// Dear god, please rename this to something more pleasant +type DagModifier struct { + dagserv *mdag.DAGService + curNode *mdag.Node + + pbdata *ft.PBData + splitter chunk.BlockSplitter +} + +func NewDagModifier(from *mdag.Node, serv *mdag.DAGService, spl chunk.BlockSplitter) (*DagModifier, error) { + pbd, err := ft.FromBytes(from.Data) + if err != nil { + return nil, err + } + + return &DagModifier{ + curNode: from.Copy(), + dagserv: serv, + pbdata: pbd, + splitter: spl, + }, nil +} + +// WriteAt will modify a dag file in place +// NOTE: it currently assumes only a single level of indirection +func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) { + + // Check bounds + if dm.pbdata.GetFilesize() < offset { + return 0, errors.New("Attempted to perform write starting past end of file") + } + + // First need to find where we are writing at + end := uint64(len(b)) + offset + + // This shouldnt be necessary if we do subblocks sizes properly + newsize := dm.pbdata.GetFilesize() + if end > dm.pbdata.GetFilesize() { + newsize = end + } + zeroblocklen := uint64(len(dm.pbdata.Data)) + origlen := len(b) + + if end <= zeroblocklen { + log.Debug("Writing into zero block.") + // Replacing zeroeth data block (embedded in the root node) + //TODO: check chunking here + copy(dm.pbdata.Data[offset:], b) + return len(b), nil + } + + // Find where write should start + var traversed uint64 + startsubblk := len(dm.pbdata.Blocksizes) + if offset < zeroblocklen { + dm.pbdata.Data = dm.pbdata.Data[:offset] + startsubblk = 0 + } else { + traversed = uint64(zeroblocklen) + for i, size := range dm.pbdata.Blocksizes { + if uint64(offset) < traversed+size { + log.Debug("Starting mod at block %d. [%d < %d + %d]", i, offset, traversed, size) + // Here is where we start + startsubblk = i + lnk := dm.curNode.Links[i] + node, err := dm.dagserv.Get(u.Key(lnk.Hash)) + if err != nil { + return 0, err + } + data, err := ft.UnwrapData(node.Data) + if err != nil { + return 0, err + } + + // We have to rewrite the data before our write in this block. + b = append(data[:offset-traversed], b...) + break + } + traversed += size + } + if startsubblk == len(dm.pbdata.Blocksizes) { + // TODO: Im not sure if theres any case that isnt being handled here. + // leaving this note here as a future reference in case something breaks + } + } + + // Find blocks that need to be overwritten + var changed []int + mid := -1 + var midoff uint64 + for i, size := range dm.pbdata.Blocksizes[startsubblk:] { + if end > traversed { + changed = append(changed, i+startsubblk) + } else { + break + } + traversed += size + if end < traversed { + mid = i + startsubblk + midoff = end - (traversed - size) + break + } + } + + // If our write starts in the middle of a block... + var midlnk *mdag.Link + if mid >= 0 { + midlnk = dm.curNode.Links[mid] + midnode, err := dm.dagserv.Get(u.Key(midlnk.Hash)) + if err != nil { + return 0, err + } + + // NOTE: this may have to be changed later when we have multiple + // layers of indirection + data, err := ft.UnwrapData(midnode.Data) + if err != nil { + return 0, err + } + b = append(b, data[midoff:]...) + } + + // Generate new sub-blocks, and sizes + subblocks := splitBytes(b, dm.splitter) + var links []*mdag.Link + var sizes []uint64 + for _, sb := range subblocks { + n := &mdag.Node{Data: ft.WrapData(sb)} + _, err := dm.dagserv.Add(n) + if err != nil { + log.Error("Failed adding node to DAG service: %s", err) + return 0, err + } + lnk, err := mdag.MakeLink(n) + if err != nil { + return 0, err + } + links = append(links, lnk) + sizes = append(sizes, uint64(len(sb))) + } + + // This is disgusting (and can be rewritten if performance demands) + if len(changed) > 0 { + sechalflink := append(links, dm.curNode.Links[changed[len(changed)-1]+1:]...) + dm.curNode.Links = append(dm.curNode.Links[:changed[0]], sechalflink...) + sechalfblks := append(sizes, dm.pbdata.Blocksizes[changed[len(changed)-1]+1:]...) + dm.pbdata.Blocksizes = append(dm.pbdata.Blocksizes[:changed[0]], sechalfblks...) + } else { + dm.curNode.Links = append(dm.curNode.Links, links...) + dm.pbdata.Blocksizes = append(dm.pbdata.Blocksizes, sizes...) + } + dm.pbdata.Filesize = proto.Uint64(newsize) + + return origlen, nil +} + +// splitBytes uses a splitterFunc to turn a large array of bytes +// into many smaller arrays of bytes +func splitBytes(b []byte, spl chunk.BlockSplitter) [][]byte { + out := spl.Split(bytes.NewReader(b)) + var arr [][]byte + for blk := range out { + arr = append(arr, blk) + } + return arr +} + +// GetNode gets the modified DAG Node +func (dm *DagModifier) GetNode() (*mdag.Node, error) { + b, err := proto.Marshal(dm.pbdata) + if err != nil { + return nil, err + } + dm.curNode.Data = b + return dm.curNode.Copy(), nil +} diff --git a/unixfs/io/dagmodifier_test.go b/unixfs/io/dagmodifier_test.go new file mode 100644 index 00000000000..32d9a84b5da --- /dev/null +++ b/unixfs/io/dagmodifier_test.go @@ -0,0 +1,200 @@ +package io + +import ( + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging" + bs "github.com/jbenet/go-ipfs/blockservice" + "github.com/jbenet/go-ipfs/importer/chunk" + mdag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + u "github.com/jbenet/go-ipfs/util" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +func getMockDagServ(t *testing.T) *mdag.DAGService { + dstore := ds.NewMapDatastore() + bserv, err := bs.NewBlockService(dstore, nil) + if err != nil { + t.Fatal(err) + } + return &mdag.DAGService{bserv} +} + +func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.Node) { + dw := NewDagWriter(dserv, &chunk.SizeSplitter{500}) + + n, err := io.CopyN(dw, u.NewFastRand(), size) + if err != nil { + t.Fatal(err) + } + if n != size { + t.Fatal("Incorrect copy amount!") + } + + dw.Close() + node := dw.GetNode() + + dr, err := NewDagReader(node, dserv) + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(dr) + if err != nil { + t.Fatal(err) + } + + return b, node +} + +func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte { + newdata := make([]byte, size) + r := u.NewFastRand() + r.Read(newdata) + + if size+beg > uint64(len(orig)) { + orig = append(orig, make([]byte, (size+beg)-uint64(len(orig)))...) + } + copy(orig[beg:], newdata) + + nmod, err := dm.WriteAt(newdata, uint64(beg)) + if err != nil { + t.Fatal(err) + } + + if nmod != int(size) { + t.Fatalf("Mod length not correct! %d != %d", nmod, size) + } + + nd, err := dm.GetNode() + if err != nil { + t.Fatal(err) + } + + rd, err := NewDagReader(nd, dm.dagserv) + if err != nil { + t.Fatal(err) + } + + after, err := ioutil.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + err = arrComp(after, orig) + if err != nil { + t.Fatal(err) + } + return orig +} + +func TestDagModifierBasic(t *testing.T) { + logging.SetLevel(logging.CRITICAL, "blockservice") + logging.SetLevel(logging.CRITICAL, "merkledag") + dserv := getMockDagServ(t) + b, n := getNode(t, dserv, 50000) + + dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512}) + if err != nil { + t.Fatal(err) + } + + // Within zero block + beg := uint64(15) + length := uint64(60) + + t.Log("Testing mod within zero block") + b = testModWrite(t, beg, length, b, dagmod) + + // Within bounds of existing file + beg = 1000 + length = 4000 + t.Log("Testing mod within bounds of existing file.") + b = testModWrite(t, beg, length, b, dagmod) + + // Extend bounds + beg = 49500 + length = 4000 + + t.Log("Testing mod that extends file.") + b = testModWrite(t, beg, length, b, dagmod) + + // "Append" + beg = uint64(len(b)) + length = 3000 + b = testModWrite(t, beg, length, b, dagmod) + + // Verify reported length + node, err := dagmod.GetNode() + if err != nil { + t.Fatal(err) + } + + size, err := ft.DataSize(node.Data) + if err != nil { + t.Fatal(err) + } + + expected := uint64(50000 + 3500 + 3000) + if size != expected { + t.Fatal("Final reported size is incorrect [%d != %d]", size, expected) + } +} + +func TestMultiWrite(t *testing.T) { + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) + + dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512}) + if err != nil { + t.Fatal(err) + } + + data := make([]byte, 4000) + u.NewFastRand().Read(data) + + for i := 0; i < len(data); i++ { + n, err := dagmod.WriteAt(data[i:i+1], uint64(i)) + if err != nil { + t.Fatal(err) + } + if n != 1 { + t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") + } + } + nd, err := dagmod.GetNode() + if err != nil { + t.Fatal(err) + } + + read, err := NewDagReader(nd, dserv) + if err != nil { + t.Fatal(err) + } + rbuf, err := ioutil.ReadAll(read) + if err != nil { + t.Fatal(err) + } + + err = arrComp(rbuf, data) + if err != nil { + t.Fatal(err) + } +} + +func arrComp(a, b []byte) error { + if len(a) != len(b) { + return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b)) + } + for i, v := range a { + if v != b[i] { + return fmt.Errorf("Arrays differ at index: %d", i) + } + } + return nil +} diff --git a/merkledag/dagreader.go b/unixfs/io/dagreader.go similarity index 67% rename from merkledag/dagreader.go rename to unixfs/io/dagreader.go index 1e8a0c8b9ad..29196a1e3f1 100644 --- a/merkledag/dagreader.go +++ b/unixfs/io/dagreader.go @@ -1,4 +1,4 @@ -package merkledag +package io import ( "bytes" @@ -6,6 +6,8 @@ import ( "io" proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + mdag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" u "github.com/jbenet/go-ipfs/util" ) @@ -13,34 +15,41 @@ var ErrIsDir = errors.New("this dag node is a directory") // DagReader provides a way to easily read the data contained in a dag. type DagReader struct { - serv *DAGService - node *Node + serv *mdag.DAGService + node *mdag.Node position int buf *bytes.Buffer } -func NewDagReader(n *Node, serv *DAGService) (io.Reader, error) { - pb := new(PBData) +// NewDagReader creates a new reader object that reads the data represented by the given +// node, using the passed in DAGService for data retreival +func NewDagReader(n *mdag.Node, serv *mdag.DAGService) (io.Reader, error) { + pb := new(ft.PBData) err := proto.Unmarshal(n.Data, pb) if err != nil { return nil, err } + switch pb.GetType() { - case PBData_Directory: + case ft.PBData_Directory: + // Dont allow reading directories return nil, ErrIsDir - case PBData_File: + case ft.PBData_File: return &DagReader{ node: n, serv: serv, buf: bytes.NewBuffer(pb.GetData()), }, nil - case PBData_Raw: + case ft.PBData_Raw: + // Raw block will just be a single level, return a byte buffer return bytes.NewBuffer(pb.GetData()), nil default: - panic("Unrecognized node type!") + return nil, ft.ErrUnrecognizedType } } +// Follows the next link in line and loads it from the DAGService, +// setting the next buffer to read from func (dr *DagReader) precalcNextBuf() error { if dr.position >= len(dr.node.Links) { return io.EOF @@ -54,7 +63,7 @@ func (dr *DagReader) precalcNextBuf() error { } nxt = nxtNode } - pb := new(PBData) + pb := new(ft.PBData) err := proto.Unmarshal(nxt.Data, pb) if err != nil { return err @@ -62,21 +71,22 @@ func (dr *DagReader) precalcNextBuf() error { dr.position++ switch pb.GetType() { - case PBData_Directory: - panic("Why is there a directory under a file?") - case PBData_File: + case ft.PBData_Directory: + return ft.ErrInvalidDirLocation + case ft.PBData_File: //TODO: this *should* work, needs testing first //return NewDagReader(nxt, dr.serv) panic("Not yet handling different layers of indirection!") - case PBData_Raw: + case ft.PBData_Raw: dr.buf = bytes.NewBuffer(pb.GetData()) return nil default: - panic("Unrecognized node type!") + return ft.ErrUnrecognizedType } } func (dr *DagReader) Read(b []byte) (int, error) { + // If no cached buffer, load one if dr.buf == nil { err := dr.precalcNextBuf() if err != nil { @@ -85,16 +95,22 @@ func (dr *DagReader) Read(b []byte) (int, error) { } total := 0 for { + // Attempt to fill bytes from cached buffer n, err := dr.buf.Read(b[total:]) total += n if err != nil { + // EOF is expected if err != io.EOF { return total, err } } + + // If weve read enough bytes, return if total == len(b) { return total, nil } + + // Otherwise, load up the next block err = dr.precalcNextBuf() if err != nil { return total, err diff --git a/unixfs/io/dagwriter.go b/unixfs/io/dagwriter.go new file mode 100644 index 00000000000..4abb1b36c76 --- /dev/null +++ b/unixfs/io/dagwriter.go @@ -0,0 +1,107 @@ +package io + +import ( + "github.com/jbenet/go-ipfs/importer/chunk" + dag "github.com/jbenet/go-ipfs/merkledag" + ft "github.com/jbenet/go-ipfs/unixfs" + "github.com/jbenet/go-ipfs/util" +) + +var log = util.Logger("dagwriter") + +type DagWriter struct { + dagserv *dag.DAGService + node *dag.Node + totalSize int64 + splChan chan []byte + done chan struct{} + splitter chunk.BlockSplitter + seterr error +} + +func NewDagWriter(ds *dag.DAGService, splitter chunk.BlockSplitter) *DagWriter { + dw := new(DagWriter) + dw.dagserv = ds + dw.splChan = make(chan []byte, 8) + dw.splitter = splitter + dw.done = make(chan struct{}) + go dw.startSplitter() + return dw +} + +// startSplitter manages splitting incoming bytes and +// creating dag nodes from them. Created nodes are stored +// in the DAGService and then released to the GC. +func (dw *DagWriter) startSplitter() { + + // Since the splitter functions take a reader (and should!) + // we wrap our byte chan input in a reader + r := util.NewByteChanReader(dw.splChan) + blkchan := dw.splitter.Split(r) + + // First data block is reserved for storage in the root node + first := <-blkchan + mbf := new(ft.MultiBlock) + root := new(dag.Node) + + for blkData := range blkchan { + // Store the block size in the root node + mbf.AddBlockSize(uint64(len(blkData))) + node := &dag.Node{Data: ft.WrapData(blkData)} + _, err := dw.dagserv.Add(node) + if err != nil { + dw.seterr = err + log.Critical("Got error adding created node to dagservice: %s", err) + return + } + + // Add a link to this node without storing a reference to the memory + err = root.AddNodeLinkClean("", node) + if err != nil { + dw.seterr = err + log.Critical("Got error adding created node to root node: %s", err) + return + } + } + + // Generate the root node data + mbf.Data = first + data, err := mbf.GetBytes() + if err != nil { + dw.seterr = err + log.Critical("Failed generating bytes for multiblock file: %s", err) + return + } + root.Data = data + + // Add root node to the dagservice + _, err = dw.dagserv.Add(root) + if err != nil { + dw.seterr = err + log.Critical("Got error adding created node to dagservice: %s", err) + return + } + dw.node = root + dw.done <- struct{}{} +} + +func (dw *DagWriter) Write(b []byte) (int, error) { + if dw.seterr != nil { + return 0, dw.seterr + } + dw.splChan <- b + return len(b), nil +} + +// Close the splitters input channel and wait for it to finish +// Must be called to finish up splitting, otherwise split method +// will never halt +func (dw *DagWriter) Close() error { + close(dw.splChan) + <-dw.done + return nil +} + +func (dw *DagWriter) GetNode() *dag.Node { + return dw.node +} diff --git a/unixfs/io/dagwriter_test.go b/unixfs/io/dagwriter_test.go new file mode 100644 index 00000000000..73ba5c4e910 --- /dev/null +++ b/unixfs/io/dagwriter_test.go @@ -0,0 +1,127 @@ +package io + +import ( + "testing" + + "io" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + bs "github.com/jbenet/go-ipfs/blockservice" + chunk "github.com/jbenet/go-ipfs/importer/chunk" + mdag "github.com/jbenet/go-ipfs/merkledag" +) + +type datasource struct { + i int +} + +func (d *datasource) Read(b []byte) (int, error) { + for i, _ := range b { + b[i] = byte(d.i % 256) + d.i++ + } + return len(b), nil +} + +func (d *datasource) Matches(t *testing.T, r io.Reader, length int) bool { + b := make([]byte, 100) + i := 0 + for { + n, err := r.Read(b) + if err != nil && err != io.EOF { + t.Fatal(err) + } + for _, v := range b[:n] { + if v != byte(i%256) { + t.Fatalf("Buffers differed at byte: %d (%d != %d)", i, v, (i % 256)) + } + i++ + } + if err == io.EOF { + break + } + } + if i != length { + t.Fatalf("Incorrect length. (%d != %d)", i, length) + } + return true +} + +func TestDagWriter(t *testing.T) { + dstore := ds.NewMapDatastore() + bserv, err := bs.NewBlockService(dstore, nil) + if err != nil { + t.Fatal(err) + } + dag := &mdag.DAGService{bserv} + dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) + + nbytes := int64(1024 * 1024 * 2) + n, err := io.CopyN(dw, &datasource{}, nbytes) + if err != nil { + t.Fatal(err) + } + + if n != nbytes { + t.Fatal("Copied incorrect amount of bytes!") + } + + dw.Close() + + node := dw.GetNode() + read, err := NewDagReader(node, dag) + if err != nil { + t.Fatal(err) + } + + d := &datasource{} + if !d.Matches(t, read, int(nbytes)) { + t.Fatal("Failed to validate!") + } +} + +func TestMassiveWrite(t *testing.T) { + t.SkipNow() + dstore := ds.NewNullDatastore() + bserv, err := bs.NewBlockService(dstore, nil) + if err != nil { + t.Fatal(err) + } + dag := &mdag.DAGService{bserv} + dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) + + nbytes := int64(1024 * 1024 * 1024 * 16) + n, err := io.CopyN(dw, &datasource{}, nbytes) + if err != nil { + t.Fatal(err) + } + if n != nbytes { + t.Fatal("Incorrect copy size.") + } + dw.Close() +} + +func BenchmarkDagWriter(b *testing.B) { + dstore := ds.NewNullDatastore() + bserv, err := bs.NewBlockService(dstore, nil) + if err != nil { + b.Fatal(err) + } + dag := &mdag.DAGService{bserv} + + b.ResetTimer() + nbytes := int64(100000) + for i := 0; i < b.N; i++ { + b.SetBytes(nbytes) + dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) + n, err := io.CopyN(dw, &datasource{}, nbytes) + if err != nil { + b.Fatal(err) + } + if n != nbytes { + b.Fatal("Incorrect copy size.") + } + dw.Close() + } + +} diff --git a/util/testutil/blocks.go b/util/testutil/blocks.go deleted file mode 100644 index 05b69239998..00000000000 --- a/util/testutil/blocks.go +++ /dev/null @@ -1,22 +0,0 @@ -package testutil - -import ( - "testing" - - blocks "github.com/jbenet/go-ipfs/blocks" -) - -// NewBlockOrFail returns a block created from msgData. Signals test failure if -// creation fails. -// -// NB: NewBlockOrFail accepts a msgData parameter to avoid non-determinism in -// tests. Generating random block data could potentially result in unexpected -// behavior in tests. Thus, it is left up to the caller to select the msgData -// that will determine the blocks key. -func NewBlockOrFail(t *testing.T, msgData string) blocks.Block { - block, blockCreationErr := blocks.NewBlock([]byte(msgData)) - if blockCreationErr != nil { - t.Fatal(blockCreationErr) - } - return *block -} diff --git a/util/util.go b/util/util.go index 41f6afede47..dfc0836beb3 100644 --- a/util/util.go +++ b/util/util.go @@ -3,16 +3,27 @@ package util import ( "errors" "fmt" + "io" + "math/rand" "os" "os/user" "path/filepath" "strings" + "time" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging" ) +func init() { + SetupLogging() +} + +// LogFormat is the format used for our logger. +var LogFormat = "%{color}%{time:2006-01-02 15:04:05.999999} %{shortfile} %{level}: %{color:reset}%{message}" + // Debug is a global flag for debugging. var Debug bool @@ -32,14 +43,67 @@ var ErrNotFound = ds.ErrNotFound // Key is a string representation of multihash for use with maps. type Key string +// String is utililty function for printing out keys as strings (Pretty). +func (k Key) String() string { + return k.Pretty() +} + // Pretty returns Key in a b58 encoded string func (k Key) Pretty() string { return b58.Encode([]byte(k)) } +// DsKey returns a Datastore key +func (k Key) DsKey() ds.Key { + return ds.NewKey(string(k)) +} + +// KeyFromDsKey returns a Datastore key +func KeyFromDsKey(dsk ds.Key) Key { + return Key(dsk.BaseNamespace()) +} + +// DsKeyB58Encode returns a B58 encoded Datastore key +// TODO: this is hacky because it encodes every path component. some +// path components may be proper strings already... +func DsKeyB58Encode(dsk ds.Key) ds.Key { + k := ds.NewKey("/") + for _, n := range dsk.Namespaces() { + k = k.Child(b58.Encode([]byte(n))) + } + return k +} + +// DsKeyB58Decode returns a b58 decoded Datastore key +// TODO: this is hacky because it encodes every path component. some +// path components may be proper strings already... +func DsKeyB58Decode(dsk ds.Key) ds.Key { + k := ds.NewKey("/") + for _, n := range dsk.Namespaces() { + k = k.Child(string(b58.Decode(n))) + } + return k +} + // Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits -func Hash(data []byte) (mh.Multihash, error) { - return mh.Sum(data, mh.SHA2_256, -1) +func Hash(data []byte) mh.Multihash { + h, err := mh.Sum(data, mh.SHA2_256, -1) + if err != nil { + // this error can be safely ignored (panic) because multihash only fails + // from the selection of hash function. If the fn + length are valid, it + // won't error. + panic("multihash failed to hash using SHA2_256.") + } + return h +} + +// IsValidHash checks whether a given hash is valid (b58 decodable, len > 0) +func IsValidHash(s string) bool { + out := b58.Decode(s) + if out == nil || len(out) == 0 { + return false + } + return true } // TildeExpansion expands a filename, which may begin with a tilde. @@ -82,6 +146,37 @@ func DOut(format string, a ...interface{}) { } } +var loggers = map[string]*logging.Logger{} + +// SetupLogging will initialize the logger backend and set the flags. +func SetupLogging() { + backend := logging.NewLogBackend(os.Stderr, "", 0) + logging.SetBackend(backend) + logging.SetFormatter(logging.MustStringFormatter(LogFormat)) + + // just uncomment Debug = True right here for all logging. + // but please don't commit that. + // Debug = True + if Debug { + logging.SetLevel(logging.DEBUG, "") + } else { + logging.SetLevel(logging.ERROR, "") + } + + for n, log := range loggers { + logging.SetLevel(logging.ERROR, n) + log.Error("setting logger: %s to %v\n", n, logging.ERROR) + } +} + +// Logger retrieves a particular logger + initializes it at a particular level +func Logger(name string) *logging.Logger { + log := logging.MustGetLogger(name) + // logging.SetLevel(lvl, name) // can't set level here. + loggers[name] = log + return log +} + // ExpandPathnames takes a set of paths and turns them into absolute paths func ExpandPathnames(paths []string) ([]string, error) { var out []string @@ -94,3 +189,76 @@ func ExpandPathnames(paths []string) ([]string, error) { } return out, nil } + +// byteChanReader wraps a byte chan in a reader +type byteChanReader struct { + in chan []byte + buf []byte +} + +func NewByteChanReader(in chan []byte) io.Reader { + return &byteChanReader{in: in} +} + +func (bcr *byteChanReader) Read(b []byte) (int, error) { + if len(bcr.buf) == 0 { + data, ok := <-bcr.in + if !ok { + return 0, io.EOF + } + bcr.buf = data + } + + if len(bcr.buf) >= len(b) { + copy(b, bcr.buf) + bcr.buf = bcr.buf[len(b):] + return len(b), nil + } + + copy(b, bcr.buf) + b = b[len(bcr.buf):] + totread := len(bcr.buf) + + for data := range bcr.in { + if len(data) > len(b) { + totread += len(b) + copy(b, data[:len(b)]) + bcr.buf = data[len(b):] + return totread, nil + } + copy(b, data) + totread += len(data) + b = b[len(data):] + if len(b) == 0 { + return totread, nil + } + } + return totread, io.EOF +} + +type randGen struct { + src rand.Source +} + +func NewFastRand() io.Reader { + return &randGen{rand.NewSource(time.Now().UnixNano())} +} + +func (r *randGen) Read(p []byte) (n int, err error) { + todo := len(p) + offset := 0 + for { + val := int64(r.src.Int63()) + for i := 0; i < 8; i++ { + p[offset] = byte(val & 0xff) + todo-- + if todo == 0 { + return len(p), nil + } + offset++ + val >>= 8 + } + } + + panic("unreachable") +} diff --git a/util/util_test.go b/util/util_test.go index 821a97e66e4..a85c492feeb 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -2,8 +2,11 @@ package util import ( "bytes" - mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + "io/ioutil" + "math/rand" "testing" + + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) func TestKey(t *testing.T) { @@ -25,3 +28,33 @@ func TestKey(t *testing.T) { t.Error("Keys not equal.") } } + +func TestByteChanReader(t *testing.T) { + data := make([]byte, 1024*1024) + r := NewFastRand() + r.Read(data) + dch := make(chan []byte, 8) + + go func() { + beg := 0 + for i := 0; i < len(data); { + i += rand.Intn(100) + 1 + if i > len(data) { + i = len(data) + } + dch <- data[beg:i] + beg = i + } + close(dch) + }() + + read := NewByteChanReader(dch) + out, err := ioutil.ReadAll(read) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out, data) { + t.Fatal("Reader failed to stream correct bytes") + } +}