diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 6b1e0ab8b1..e0fb3e897c 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -98,13 +98,22 @@ func makeTestDB() (*DB, func(), error) { } // Next, create channeldb for the first time. - cdb, err := Open(tempDirName, OptionClock(testClock)) + backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") if err != nil { + backendCleanup() + return nil, nil, err + } + + cdb, err := CreateWithBackend(backend, OptionClock(testClock)) + if err != nil { + backendCleanup() + os.RemoveAll(tempDirName) return nil, nil, err } cleanUp := func() { cdb.Close() + backendCleanup() os.RemoveAll(tempDirName) } diff --git a/channeldb/db.go b/channeldb/db.go index 43ad6cde41..b6c7daf5c6 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -6,11 +6,11 @@ import ( "fmt" "net" "os" - "path/filepath" "time" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcwallet/walletdb" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/channeldb/migration12" @@ -148,38 +148,79 @@ var ( // schedules, and reputation data. type DB struct { kvdb.Backend + dbPath string graph *ChannelGraph clock clock.Clock dryRun bool } -// Open opens an existing channeldb. Any necessary schemas migrations due to -// updates will take place as necessary. -func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) { - path := filepath.Join(dbPath, dbName) +// Update is a wrapper around walletdb.Update which calls into the extended +// backend when available. This call is needed to be able to cast DB to +// ExtendedBackend. +func (db *DB) Update(f func(tx walletdb.ReadWriteTx) error) error { + if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { + return v.Update(f) + } + return walletdb.Update(db, f) +} - if !fileExists(path) { - if err := createChannelDB(dbPath); err != nil { - return nil, err - } +// View is a wrapper around walletdb.View which calls into the extended +// backend when available. This call is needed to be able to cast DB to +// ExtendedBackend. +func (db *DB) View(f func(tx walletdb.ReadTx) error) error { + if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { + return v.View(f) + } + + return walletdb.View(db, f) +} + +// PrintStats calls into the extended backend if available. This call is needed +// to be able to cast DB to ExtendedBackend. +func (db *DB) PrintStats() string { + if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { + return v.PrintStats() } + return "unimplemented" +} + +// Open opens or creates channeldb. Any necessary schemas migrations due +// to updates will take place as necessary. +// TODO(bhandras): deprecate this function. +func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) { opts := DefaultOptions() for _, modifier := range modifiers { modifier(&opts) } - // Specify bbolt freelist options to reduce heap pressure in case the - // freelist grows to be very large. - bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync) + backend, err := kvdb.GetBoltBackend(dbPath, dbName, opts.NoFreelistSync) if err != nil { return nil, err } + db, err := CreateWithBackend(backend, modifiers...) + if err == nil { + db.dbPath = dbPath + } + return db, err +} + +// CreateWithBackend creates channeldb instance using the passed kvdb.Backend. +// Any necessary schemas migrations due to updates will take place as necessary. +func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB, error) { + if err := initChannelDB(backend); err != nil { + return nil, err + } + + opts := DefaultOptions() + for _, modifier := range modifiers { + modifier(&opts) + } + chanDB := &DB{ - Backend: bdb, - dbPath: dbPath, + Backend: backend, clock: opts.clock, dryRun: opts.dryRun, } @@ -189,7 +230,7 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) { // Synchronize the version of database and apply migrations if needed. if err := chanDB.syncVersions(dbVersions); err != nil { - bdb.Close() + backend.Close() return nil, err } @@ -251,20 +292,15 @@ func (d *DB) Wipe() error { // the case that the target path has not yet been created or doesn't yet exist, // then the path is created. Additionally, all required top-level buckets used // within the database are created. -func createChannelDB(dbPath string) error { - if !fileExists(dbPath) { - if err := os.MkdirAll(dbPath, 0700); err != nil { - return err +func initChannelDB(db kvdb.Backend) error { + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + meta := &Meta{} + // Check if DB is already initialized. + err := fetchMeta(meta, tx) + if err == nil { + return nil } - } - - path := filepath.Join(dbPath, dbName) - bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true) - if err != nil { - return err - } - err = kvdb.Update(bdb, func(tx kvdb.RwTx) error { if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil { return err } @@ -331,16 +367,14 @@ func createChannelDB(dbPath string) error { return err } - meta := &Meta{ - DbVersionNumber: getLatestDBVersion(dbVersions), - } + meta.DbVersionNumber = getLatestDBVersion(dbVersions) return putMeta(meta, tx) }) if err != nil { - return fmt.Errorf("unable to create new channeldb") + return fmt.Errorf("unable to create new channeldb: %v", err) } - return bdb.Close() + return nil } // fileExists returns true if the file exists, and false otherwise. @@ -373,7 +407,7 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) // stored currently active/open channels associated with the target nodeID. In // the case that no active channels are known to have been created with this // node, then a zero-length slice is returned. -func (d *DB) fetchOpenChannels(tx kvdb.ReadTx, +func (db *DB) fetchOpenChannels(tx kvdb.ReadTx, nodeID *btcec.PublicKey) ([]*OpenChannel, error) { // Get the bucket dedicated to storing the metadata for open channels. @@ -409,7 +443,7 @@ func (d *DB) fetchOpenChannels(tx kvdb.ReadTx, // Finally, we both of the necessary buckets retrieved, fetch // all the active channels related to this node. - nodeChannels, err := d.fetchNodeChannels(chainBucket) + nodeChannels, err := db.fetchNodeChannels(chainBucket) if err != nil { return fmt.Errorf("unable to read channel for "+ "chain_hash=%x, node_key=%x: %v", @@ -426,7 +460,7 @@ func (d *DB) fetchOpenChannels(tx kvdb.ReadTx, // fetchNodeChannels retrieves all active channels from the target chainBucket // which is under a node's dedicated channel bucket. This function is typically // used to fetch all the active channels related to a particular node. -func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) { +func (db *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) { var channels []*OpenChannel @@ -452,7 +486,7 @@ func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, err return fmt.Errorf("unable to read channel data for "+ "chan_point=%v: %v", outPoint, err) } - oChannel.Db = d + oChannel.Db = db channels = append(channels, oChannel) @@ -906,8 +940,8 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { // pruneLinkNode determines whether we should garbage collect a link node from // the database due to no longer having any open channels with it. If there are // any left, then this acts as a no-op. -func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { - openChannels, err := d.fetchOpenChannels(tx, remotePub) +func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { + openChannels, err := db.fetchOpenChannels(tx, remotePub) if err != nil { return fmt.Errorf("unable to fetch open channels for peer %x: "+ "%v", remotePub.SerializeCompressed(), err) @@ -920,7 +954,7 @@ func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { log.Infof("Pruning link node %x with zero open channels from database", remotePub.SerializeCompressed()) - return d.deleteLinkNode(tx, remotePub) + return db.deleteLinkNode(tx, remotePub) } // PruneLinkNodes attempts to prune all link nodes found within the databse with @@ -1132,16 +1166,16 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) { // database. If the channel was already removed (has a closed channel entry), // then we'll return a nil error. Otherwise, we'll insert a new close summary // into the database. -func (d *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error { +func (db *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error { // With the chanPoint constructed, we'll attempt to find the target // channel in the database. If we can't find the channel, then we'll // return the error back to the caller. - dbChan, err := d.FetchChannel(*chanPoint) + dbChan, err := db.FetchChannel(*chanPoint) switch { // If the channel wasn't found, then it's possible that it was already // abandoned from the database. case err == ErrChannelNotFound: - _, closedErr := d.FetchClosedChannel(chanPoint) + _, closedErr := db.FetchClosedChannel(chanPoint) if closedErr != nil { return closedErr } @@ -1304,9 +1338,9 @@ func fetchHistoricalChanBucket(tx kvdb.ReadTx, // FetchHistoricalChannel fetches open channel data from the historical channel // bucket. -func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { +func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { var channel *OpenChannel - err := kvdb.View(d, func(tx kvdb.ReadTx) error { + err := kvdb.View(db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchHistoricalChanBucket(tx, outPoint) if err != nil { return err diff --git a/channeldb/db_test.go b/channeldb/db_test.go index 242b70ca4d..c97d23c02e 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -15,6 +15,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" @@ -33,7 +34,13 @@ func TestOpenWithCreate(t *testing.T) { // Next, open thereby creating channeldb for the first time. dbPath := filepath.Join(tempDirName, "cdb") - cdb, err := Open(dbPath) + backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") + if err != nil { + t.Fatalf("unable to get test db backend: %v", err) + } + defer cleanup() + + cdb, err := CreateWithBackend(backend) if err != nil { t.Fatalf("unable to create channeldb: %v", err) } @@ -73,7 +80,13 @@ func TestWipe(t *testing.T) { // Next, open thereby creating channeldb for the first time. dbPath := filepath.Join(tempDirName, "cdb") - cdb, err := Open(dbPath) + backend, cleanup, err := kvdb.GetTestBackend(dbPath, "cdb") + if err != nil { + t.Fatalf("unable to get test db backend: %v", err) + } + defer cleanup() + + cdb, err := CreateWithBackend(backend) if err != nil { t.Fatalf("unable to create channeldb: %v", err) } diff --git a/channeldb/kvdb/backend.go b/channeldb/kvdb/backend.go new file mode 100644 index 0000000000..5f710ed9d2 --- /dev/null +++ b/channeldb/kvdb/backend.go @@ -0,0 +1,70 @@ +package kvdb + +import ( + "fmt" + "os" + "path/filepath" + + _ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend. +) + +// fileExists returns true if the file exists, and false otherwise. +func fileExists(path string) bool { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false + } + } + + return true +} + +// GetBoltBackend opens (or creates if doesn't exits) a bbolt +// backed database and returns a kvdb.Backend wrapping it. +func GetBoltBackend(path, name string, noFreeListSync bool) (Backend, error) { + dbFilePath := filepath.Join(path, name) + var ( + db Backend + err error + ) + + if !fileExists(dbFilePath) { + if !fileExists(path) { + if err := os.MkdirAll(path, 0700); err != nil { + return nil, err + } + } + + db, err = Create(BoltBackendName, dbFilePath, noFreeListSync) + } else { + db, err = Open(BoltBackendName, dbFilePath, noFreeListSync) + } + + if err != nil { + return nil, err + } + + return db, nil +} + +// GetTestBackend opens (or creates if doesn't exist) a bbolt or etcd +// backed database (for testing), and returns a kvdb.Backend and a cleanup +// func. Whether to create/open bbolt or embedded etcd database is based +// on the TestBackend constant which is conditionally compiled with build tag. +// The passed path is used to hold all db files, while the name is only used +// for bbolt. +func GetTestBackend(path, name string) (Backend, func(), error) { + empty := func() {} + + if TestBackend == BoltBackendName { + db, err := GetBoltBackend(path, name, true) + if err != nil { + return nil, nil, err + } + return db, empty, nil + } else if TestBackend == EtcdBackendName { + return GetEtcdTestBackend(path, name) + } + + return nil, nil, fmt.Errorf("unknown backend") +} diff --git a/channeldb/kvdb/bbolt.go b/channeldb/kvdb/bbolt.go deleted file mode 100644 index b249e7dbb6..0000000000 --- a/channeldb/kvdb/bbolt.go +++ /dev/null @@ -1,10 +0,0 @@ -package kvdb - -import ( - _ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend. -) - -// BoltBackendName is the name of the backend that should be passed into -// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live -// instance of bbolt. -const BoltBackendName = "bdb" diff --git a/channeldb/kvdb/config.go b/channeldb/kvdb/config.go new file mode 100644 index 0000000000..a4ed68bab9 --- /dev/null +++ b/channeldb/kvdb/config.go @@ -0,0 +1,33 @@ +package kvdb + +// BoltBackendName is the name of the backend that should be passed into +// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live +// instance of bbolt. +const BoltBackendName = "bdb" + +// EtcdBackendName is the name of the backend that should be passed into +// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live +// instance of etcd. +const EtcdBackendName = "etcd" + +// BoltConfig holds bolt configuration. +type BoltConfig struct { + NoFreeListSync bool `long:"nofreelistsync" description:"If true, prevents the database from syncing its freelist to disk"` +} + +// EtcdConfig holds etcd configuration. +type EtcdConfig struct { + Host string `long:"host" description:"Etcd database host."` + + User string `long:"user" description:"Etcd database user."` + + Pass string `long:"pass" description:"Password for the database user."` + + CertFile string `long:"cert_file" description:"Path to the TLS certificate for etcd RPC."` + + KeyFile string `long:"key_file" description:"Path to the TLS private key for etcd RPC."` + + InsecureSkipVerify bool `long:"insecure_skip_verify" description:"Whether we intend to skip TLS verification"` + + CollectStats bool `long:"collect_stats" description:"Whether to collect etcd commit stats."` +} diff --git a/channeldb/kvdb/etcd/bucket.go b/channeldb/kvdb/etcd/bucket.go new file mode 100644 index 0000000000..3bc087dbf3 --- /dev/null +++ b/channeldb/kvdb/etcd/bucket.go @@ -0,0 +1,79 @@ +// +build kvdb_etcd + +package etcd + +import ( + "crypto/sha256" +) + +const ( + bucketIDLength = 32 +) + +var ( + bucketPrefix = []byte("b") + valuePrefix = []byte("v") + sequencePrefix = []byte("$") +) + +// makeBucketID returns a deterministic key for the passed byte slice. +// Currently it returns the sha256 hash of the slice. +func makeBucketID(key []byte) [bucketIDLength]byte { + return sha256.Sum256(key) +} + +// isValidBucketID checks if the passed slice is the required length to be a +// valid bucket id. +func isValidBucketID(s []byte) bool { + return len(s) == bucketIDLength +} + +// makeKey concatenates prefix, parent and key into one byte slice. +// The prefix indicates the use of this key (whether bucket, value or sequence), +// while parentID refers to the parent bucket. +func makeKey(prefix, parent, key []byte) []byte { + keyBuf := make([]byte, len(prefix)+len(parent)+len(key)) + copy(keyBuf, prefix) + copy(keyBuf[len(prefix):], parent) + copy(keyBuf[len(prefix)+len(parent):], key) + + return keyBuf +} + +// makePrefix concatenates prefix with parent into one byte slice. +func makePrefix(prefix []byte, parent []byte) []byte { + prefixBuf := make([]byte, len(prefix)+len(parent)) + copy(prefixBuf, prefix) + copy(prefixBuf[len(prefix):], parent) + + return prefixBuf +} + +// makeBucketKey returns a bucket key from the passed parent bucket id and +// the key. +func makeBucketKey(parent []byte, key []byte) []byte { + return makeKey(bucketPrefix, parent, key) +} + +// makeValueKey returns a value key from the passed parent bucket id and +// the key. +func makeValueKey(parent []byte, key []byte) []byte { + return makeKey(valuePrefix, parent, key) +} + +// makeSequenceKey returns a sequence key of the passed parent bucket id. +func makeSequenceKey(parent []byte) []byte { + return makeKey(sequencePrefix, parent, nil) +} + +// makeBucketPrefix returns the bucket prefix of the passed parent bucket id. +// This prefix is used for all sub buckets. +func makeBucketPrefix(parent []byte) []byte { + return makePrefix(bucketPrefix, parent) +} + +// makeValuePrefix returns the value prefix of the passed parent bucket id. +// This prefix is used for all key/values in the bucket. +func makeValuePrefix(parent []byte) []byte { + return makePrefix(valuePrefix, parent) +} diff --git a/channeldb/kvdb/etcd/bucket_test.go b/channeldb/kvdb/etcd/bucket_test.go new file mode 100644 index 0000000000..e68821f1e0 --- /dev/null +++ b/channeldb/kvdb/etcd/bucket_test.go @@ -0,0 +1,42 @@ +// +build kvdb_etcd + +package etcd + +// bkey is a helper functon used in tests to create a bucket key from passed +// bucket list. +func bkey(buckets ...string) string { + var bucketKey []byte + + rootID := makeBucketID([]byte("")) + parent := rootID[:] + + for _, bucketName := range buckets { + bucketKey = makeBucketKey(parent, []byte(bucketName)) + id := makeBucketID(bucketKey) + parent = id[:] + } + + return string(bucketKey) +} + +// bval is a helper function used in tests to create a bucket value (the value +// for a bucket key) from the passed bucket list. +func bval(buckets ...string) string { + id := makeBucketID([]byte(bkey(buckets...))) + return string(id[:]) +} + +// vkey is a helper function used in tests to create a value key from the +// passed key and bucket list. +func vkey(key string, buckets ...string) string { + rootID := makeBucketID([]byte("")) + bucket := rootID[:] + + for _, bucketName := range buckets { + bucketKey := makeBucketKey(bucket, []byte(bucketName)) + id := makeBucketID(bucketKey) + bucket = id[:] + } + + return string(makeValueKey(bucket, []byte(key))) +} diff --git a/channeldb/kvdb/etcd/db.go b/channeldb/kvdb/etcd/db.go new file mode 100644 index 0000000000..a082e61098 --- /dev/null +++ b/channeldb/kvdb/etcd/db.go @@ -0,0 +1,291 @@ +// +build kvdb_etcd + +package etcd + +import ( + "context" + "fmt" + "io" + "runtime" + "sync" + "time" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/pkg/transport" +) + +const ( + // etcdConnectionTimeout is the timeout until successful connection to the + // etcd instance. + etcdConnectionTimeout = 10 * time.Second + + // etcdLongTimeout is a timeout for longer taking etcd operatons. + etcdLongTimeout = 30 * time.Second +) + +// callerStats holds commit stats for a specific caller. Currently it only +// holds the max stat, meaning that for a particular caller the largest +// commit set is recorded. +type callerStats struct { + count int + commitStats CommitStats +} + +func (s callerStats) String() string { + return fmt.Sprintf("count: %d, retries: %d, rset: %d, wset: %d", + s.count, s.commitStats.Retries, s.commitStats.Rset, s.commitStats.Wset) +} + +// commitStatsCollector collects commit stats for commits succeeding +// and also for commits failing. +type commitStatsCollector struct { + sync.RWMutex + succ map[string]*callerStats + fail map[string]*callerStats +} + +// newCommitStatsColletor creates a new commitStatsCollector instance. +func newCommitStatsColletor() *commitStatsCollector { + return &commitStatsCollector{ + succ: make(map[string]*callerStats), + fail: make(map[string]*callerStats), + } +} + +// PrintStats returns collected stats pretty printed into a string. +func (c *commitStatsCollector) PrintStats() string { + c.RLock() + defer c.RUnlock() + + s := "\nFailure:\n" + for k, v := range c.fail { + s += fmt.Sprintf("%s\t%s\n", k, v) + } + + s += "\nSuccess:\n" + for k, v := range c.succ { + s += fmt.Sprintf("%s\t%s\n", k, v) + } + + return s +} + +// updateStatsMap updatess commit stats map for a caller. +func updateStatMap( + caller string, stats CommitStats, m map[string]*callerStats) { + + if _, ok := m[caller]; !ok { + m[caller] = &callerStats{} + } + + curr := m[caller] + curr.count++ + + // Update only if the total commit set is greater or equal. + currTotal := curr.commitStats.Rset + curr.commitStats.Wset + if currTotal <= (stats.Rset + stats.Wset) { + curr.commitStats = stats + } +} + +// callback is an STM commit stats callback passed which can be passed +// using a WithCommitStatsCallback to the STM upon construction. +func (c *commitStatsCollector) callback(succ bool, stats CommitStats) { + caller := "unknown" + + // Get the caller. As this callback is called from + // the backend interface that means we need to ascend + // 4 frames in the callstack. + _, file, no, ok := runtime.Caller(4) + if ok { + caller = fmt.Sprintf("%s#%d", file, no) + } + + c.Lock() + defer c.Unlock() + + if succ { + updateStatMap(caller, stats, c.succ) + } else { + updateStatMap(caller, stats, c.fail) + } +} + +// db holds a reference to the etcd client connection. +type db struct { + config BackendConfig + cli *clientv3.Client + commitStatsCollector *commitStatsCollector +} + +// Enforce db implements the walletdb.DB interface. +var _ walletdb.DB = (*db)(nil) + +// BackendConfig holds and etcd backend config and connection parameters. +type BackendConfig struct { + // Host holds the peer url of the etcd instance. + Host string + + // User is the username for the etcd peer. + User string + + // Pass is the password for the etcd peer. + Pass string + + // CertFile holds the path to the TLS certificate for etcd RPC. + CertFile string + + // KeyFile holds the path to the TLS private key for etcd RPC. + KeyFile string + + // InsecureSkipVerify should be set to true if we intend to + // skip TLS verification. + InsecureSkipVerify bool + + // Prefix the hash of the prefix will be used as the root + // bucket id. This enables key space separation similar to + // name spaces. + Prefix string + + // CollectCommitStats indicates wheter to commit commit stats. + CollectCommitStats bool +} + +// newEtcdBackend returns a db object initialized with the passed backend +// config. If etcd connection cannot be estabished, then returns error. +func newEtcdBackend(config BackendConfig) (*db, error) { + tlsInfo := transport.TLSInfo{ + CertFile: config.CertFile, + KeyFile: config.KeyFile, + InsecureSkipVerify: config.InsecureSkipVerify, + } + + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, err + } + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{config.Host}, + DialTimeout: etcdConnectionTimeout, + Username: config.User, + Password: config.Pass, + TLS: tlsConfig, + }) + + if err != nil { + return nil, err + } + + backend := &db{ + cli: cli, + config: config, + } + + if config.CollectCommitStats { + backend.commitStatsCollector = newCommitStatsColletor() + } + + return backend, nil +} + +// getSTMOptions creats all STM options based on the backend config. +func (db *db) getSTMOptions() []STMOptionFunc { + opts := []STMOptionFunc{} + if db.config.CollectCommitStats { + opts = append(opts, + WithCommitStatsCallback(db.commitStatsCollector.callback), + ) + } + + return opts +} + +// View opens a database read transaction and executes the function f with the +// transaction passed as a parameter. After f exits, the transaction is rolled +// back. If f errors, its error is returned, not a rollback error (if any +// occur). +func (db *db) View(f func(tx walletdb.ReadTx) error) error { + apply := func(stm STM) error { + return f(newReadWriteTx(stm, db.config.Prefix)) + } + + return RunSTM(db.cli, apply, db.getSTMOptions()...) +} + +// Update opens a database read/write transaction and executes the function f +// with the transaction passed as a parameter. After f exits, if f did not +// error, the transaction is committed. Otherwise, if f did error, the +// transaction is rolled back. If the rollback fails, the original error +// returned by f is still returned. If the commit fails, the commit error is +// returned. +func (db *db) Update(f func(tx walletdb.ReadWriteTx) error) error { + apply := func(stm STM) error { + return f(newReadWriteTx(stm, db.config.Prefix)) + } + + return RunSTM(db.cli, apply, db.getSTMOptions()...) +} + +// PrintStats returns all collected stats pretty printed into a string. +func (db *db) PrintStats() string { + if db.commitStatsCollector != nil { + return db.commitStatsCollector.PrintStats() + } + + return "" +} + +// BeginReadTx opens a database read transaction. +func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, error) { + return newReadWriteTx( + NewSTM(db.cli, db.getSTMOptions()...), + db.config.Prefix, + ), nil +} + +// BeginReadWriteTx opens a database read+write transaction. +func (db *db) BeginReadTx() (walletdb.ReadTx, error) { + return newReadWriteTx( + NewSTM(db.cli, db.getSTMOptions()...), + db.config.Prefix, + ), nil +} + +// Copy writes a copy of the database to the provided writer. This call will +// start a read-only transaction to perform all operations. +// This function is part of the walletdb.Db interface implementation. +func (db *db) Copy(w io.Writer) error { + ctx := context.Background() + + ctx, cancel := context.WithTimeout(ctx, etcdLongTimeout) + defer cancel() + + readCloser, err := db.cli.Snapshot(ctx) + if err != nil { + return err + } + + _, err = io.Copy(w, readCloser) + + return err +} + +// Close cleanly shuts down the database and syncs all data. +// This function is part of the walletdb.Db interface implementation. +func (db *db) Close() error { + return db.cli.Close() +} + +// Batch opens a database read/write transaction and executes the function f +// with the transaction passed as a parameter. After f exits, if f did not +// error, the transaction is committed. Otherwise, if f did error, the +// transaction is rolled back. If the rollback fails, the original error +// returned by f is still returned. If the commit fails, the commit error is +// returned. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *db) Batch(apply func(tx walletdb.ReadWriteTx) error) error { + return db.Update(apply) +} diff --git a/channeldb/kvdb/etcd/db_test.go b/channeldb/kvdb/etcd/db_test.go new file mode 100644 index 0000000000..69342207ad --- /dev/null +++ b/channeldb/kvdb/etcd/db_test.go @@ -0,0 +1,44 @@ +// +build kvdb_etcd + +package etcd + +import ( + "bytes" + "testing" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/stretchr/testify/assert" +) + +func TestCopy(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + // "apple" + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, apple) + + assert.NoError(t, apple.Put([]byte("key"), []byte("val"))) + return nil + }) + + // Expect non-zero copy. + var buf bytes.Buffer + + assert.NoError(t, db.Copy(&buf)) + assert.Greater(t, buf.Len(), 0) + assert.Nil(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + vkey("key", "apple"): "val", + } + assert.Equal(t, expected, f.Dump()) +} diff --git a/channeldb/kvdb/etcd/driver.go b/channeldb/kvdb/etcd/driver.go new file mode 100644 index 0000000000..8e313fc613 --- /dev/null +++ b/channeldb/kvdb/etcd/driver.go @@ -0,0 +1,68 @@ +// +build kvdb_etcd + +package etcd + +import ( + "fmt" + + "github.com/btcsuite/btcwallet/walletdb" +) + +const ( + dbType = "etcd" +) + +// parseArgs parses the arguments from the walletdb Open/Create methods. +func parseArgs(funcName string, args ...interface{}) (*BackendConfig, error) { + if len(args) != 1 { + return nil, fmt.Errorf("invalid number of arguments to %s.%s -- "+ + "expected: etcd.BackendConfig", + dbType, funcName, + ) + } + + config, ok := args[0].(BackendConfig) + if !ok { + return nil, fmt.Errorf("argument to %s.%s is invalid -- "+ + "expected: etcd.BackendConfig", + dbType, funcName, + ) + } + + return &config, nil +} + +// createDBDriver is the callback provided during driver registration that +// creates, initializes, and opens a database for use. +func createDBDriver(args ...interface{}) (walletdb.DB, error) { + config, err := parseArgs("Create", args...) + if err != nil { + return nil, err + } + + return newEtcdBackend(*config) +} + +// openDBDriver is the callback provided during driver registration that opens +// an existing database for use. +func openDBDriver(args ...interface{}) (walletdb.DB, error) { + config, err := parseArgs("Open", args...) + if err != nil { + return nil, err + } + + return newEtcdBackend(*config) +} + +func init() { + // Register the driver. + driver := walletdb.Driver{ + DbType: dbType, + Create: createDBDriver, + Open: openDBDriver, + } + if err := walletdb.RegisterDriver(driver); err != nil { + panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", + dbType, err)) + } +} diff --git a/channeldb/kvdb/etcd/driver_test.go b/channeldb/kvdb/etcd/driver_test.go new file mode 100644 index 0000000000..365eda7a09 --- /dev/null +++ b/channeldb/kvdb/etcd/driver_test.go @@ -0,0 +1,30 @@ +// +build kvdb_etcd + +package etcd + +import ( + "testing" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/stretchr/testify/assert" +) + +func TestOpenCreateFailure(t *testing.T) { + t.Parallel() + + db, err := walletdb.Open(dbType) + assert.Error(t, err) + assert.Nil(t, db) + + db, err = walletdb.Open(dbType, "wrong") + assert.Error(t, err) + assert.Nil(t, db) + + db, err = walletdb.Create(dbType) + assert.Error(t, err) + assert.Nil(t, db) + + db, err = walletdb.Create(dbType, "wrong") + assert.Error(t, err) + assert.Nil(t, db) +} diff --git a/channeldb/kvdb/etcd/embed.go b/channeldb/kvdb/etcd/embed.go new file mode 100644 index 0000000000..f19363f35a --- /dev/null +++ b/channeldb/kvdb/etcd/embed.go @@ -0,0 +1,75 @@ +// +build kvdb_etcd + +package etcd + +import ( + "fmt" + "net" + "net/url" + "time" + + "github.com/coreos/etcd/embed" +) + +const ( + // readyTimeout is the time until the embedded etcd instance should start. + readyTimeout = 10 * time.Second +) + +// getFreePort returns a random open TCP port. +func getFreePort() int { + ln, err := net.Listen("tcp", "[::]:0") + if err != nil { + panic(err) + } + + port := ln.Addr().(*net.TCPAddr).Port + + err = ln.Close() + if err != nil { + panic(err) + } + + return port +} + +// NewEmbeddedEtcdInstance creates an embedded etcd instance for testing, +// listening on random open ports. Returns the backend config and a cleanup +// func that will stop the etcd instance. +func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), error) { + cfg := embed.NewConfig() + cfg.Dir = path + + // To ensure that we can submit large transactions. + cfg.MaxTxnOps = 1000 + + // Listen on random free ports. + clientURL := fmt.Sprintf("127.0.0.1:%d", getFreePort()) + peerURL := fmt.Sprintf("127.0.0.1:%d", getFreePort()) + cfg.LCUrls = []url.URL{{Host: clientURL}} + cfg.LPUrls = []url.URL{{Host: peerURL}} + + etcd, err := embed.StartEtcd(cfg) + if err != nil { + return nil, nil, err + } + + select { + case <-etcd.Server.ReadyNotify(): + case <-time.After(readyTimeout): + etcd.Close() + return nil, nil, + fmt.Errorf("etcd failed to start after: %v", readyTimeout) + } + + connConfig := &BackendConfig{ + Host: "http://" + peerURL, + User: "user", + Pass: "pass", + InsecureSkipVerify: true, + } + + return connConfig, func() { + etcd.Close() + }, nil +} diff --git a/channeldb/kvdb/etcd/fixture_test.go b/channeldb/kvdb/etcd/fixture_test.go new file mode 100644 index 0000000000..5652690686 --- /dev/null +++ b/channeldb/kvdb/etcd/fixture_test.go @@ -0,0 +1,129 @@ +// +build kvdb_etcd + +package etcd + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" +) + +const ( + // testEtcdTimeout is used for all RPC calls initiated by the test fixture. + testEtcdTimeout = 5 * time.Second +) + +// EtcdTestFixture holds internal state of the etcd test fixture. +type EtcdTestFixture struct { + t *testing.T + cli *clientv3.Client + config *BackendConfig + cleanup func() +} + +// NewTestEtcdInstance creates an embedded etcd instance for testing, listening +// on random open ports. Returns the connection config and a cleanup func that +// will stop the etcd instance. +func NewTestEtcdInstance(t *testing.T, path string) (*BackendConfig, func()) { + t.Helper() + + config, cleanup, err := NewEmbeddedEtcdInstance(path) + if err != nil { + t.Fatalf("error while staring embedded etcd instance: %v", err) + } + + return config, cleanup +} + +// NewTestEtcdTestFixture creates a new etcd-test fixture. This is helper +// object to facilitate etcd tests and ensure pre and post conditions. +func NewEtcdTestFixture(t *testing.T) *EtcdTestFixture { + tmpDir, err := ioutil.TempDir("", "etcd") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + + config, etcdCleanup := NewTestEtcdInstance(t, tmpDir) + + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{config.Host}, + Username: config.User, + Password: config.Pass, + }) + if err != nil { + os.RemoveAll(tmpDir) + t.Fatalf("unable to create etcd test fixture: %v", err) + } + + return &EtcdTestFixture{ + t: t, + cli: cli, + config: config, + cleanup: func() { + etcdCleanup() + os.RemoveAll(tmpDir) + }, + } +} + +// Put puts a string key/value into the test etcd database. +func (f *EtcdTestFixture) Put(key, value string) { + ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) + defer cancel() + + _, err := f.cli.Put(ctx, key, value) + if err != nil { + f.t.Fatalf("etcd test fixture failed to put: %v", err) + } +} + +// Get queries a key and returns the stored value from the test etcd database. +func (f *EtcdTestFixture) Get(key string) string { + ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) + defer cancel() + + resp, err := f.cli.Get(ctx, key) + if err != nil { + f.t.Fatalf("etcd test fixture failed to put: %v", err) + } + + if len(resp.Kvs) > 0 { + return string(resp.Kvs[0].Value) + } + + return "" +} + +// Dump scans and returns all key/values from the test etcd database. +func (f *EtcdTestFixture) Dump() map[string]string { + ctx, cancel := context.WithTimeout(context.TODO(), testEtcdTimeout) + defer cancel() + + resp, err := f.cli.Get(ctx, "", clientv3.WithPrefix()) + if err != nil { + f.t.Fatalf("etcd test fixture failed to put: %v", err) + } + + result := make(map[string]string) + for _, kv := range resp.Kvs { + result[string(kv.Key)] = string(kv.Value) + } + + return result +} + +// BackendConfig returns the backend config for connecting to theembedded +// etcd instance. +func (f *EtcdTestFixture) BackendConfig() BackendConfig { + return *f.config +} + +// Cleanup should be called at test fixture teardown to stop the embedded +// etcd instance and remove all temp db files form the filesystem. +func (f *EtcdTestFixture) Cleanup() { + f.cleanup() +} diff --git a/channeldb/kvdb/etcd/readwrite_bucket.go b/channeldb/kvdb/etcd/readwrite_bucket.go new file mode 100644 index 0000000000..e60d2cec33 --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_bucket.go @@ -0,0 +1,364 @@ +// +build kvdb_etcd + +package etcd + +import ( + "bytes" + "strconv" + + "github.com/btcsuite/btcwallet/walletdb" +) + +// readWriteBucket stores the bucket id and the buckets transaction. +type readWriteBucket struct { + // id is used to identify the bucket and is created by + // hashing the parent id with the bucket key. For each key/value, + // sub-bucket or the bucket sequence the bucket id is used with the + // appropriate prefix to prefix the key. + id []byte + + // tx holds the parent transaction. + tx *readWriteTx +} + +// newReadWriteBucket creates a new rw bucket with the passed transaction +// and bucket id. +func newReadWriteBucket(tx *readWriteTx, key, id []byte) *readWriteBucket { + if !bytes.Equal(id, tx.rootBucketID[:]) { + // Add the bucket key/value to the lock set. + tx.lock(string(key), string(id)) + } + + return &readWriteBucket{ + id: id, + tx: tx, + } +} + +// NestedReadBucket retrieves a nested read bucket with the given key. +// Returns nil if the bucket does not exist. +func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket { + return b.NestedReadWriteBucket(key) +} + +// ForEach invokes the passed function with every key/value pair in +// the bucket. This includes nested buckets, in which case the value +// is nil, but it does not include the key/value pairs within those +// nested buckets. +func (b *readWriteBucket) ForEach(cb func(k, v []byte) error) error { + prefix := makeValuePrefix(b.id) + prefixLen := len(prefix) + + // Get the first matching key that is in the bucket. + kv, err := b.tx.stm.First(string(prefix)) + if err != nil { + return err + } + + for kv != nil { + if err := cb([]byte(kv.key[prefixLen:]), []byte(kv.val)); err != nil { + return err + } + + // Step to the next key. + kv, err = b.tx.stm.Next(string(prefix), kv.key) + if err != nil { + return err + } + } + + // Make a bucket prefix. This prefixes all sub buckets. + prefix = makeBucketPrefix(b.id) + prefixLen = len(prefix) + + // Get the first bucket. + kv, err = b.tx.stm.First(string(prefix)) + if err != nil { + return err + } + + for kv != nil { + if err := cb([]byte(kv.key[prefixLen:]), nil); err != nil { + return err + } + + // Step to the next bucket. + kv, err = b.tx.stm.Next(string(prefix), kv.key) + if err != nil { + return err + } + } + + return nil +} + +// Get returns the value for the given key. Returns nil if the key does +// not exist in this bucket. +func (b *readWriteBucket) Get(key []byte) []byte { + // Return nil if the key is empty. + if len(key) == 0 { + return nil + } + + // Fetch the associated value. + val, err := b.tx.stm.Get(string(makeValueKey(b.id, key))) + if err != nil { + // TODO: we should return the error once the + // kvdb inteface is extended. + return nil + } + + if val == nil { + return nil + } + + return val +} + +func (b *readWriteBucket) ReadCursor() walletdb.ReadCursor { + return newReadWriteCursor(b) +} + +// NestedReadWriteBucket retrieves a nested bucket with the given key. +// Returns nil if the bucket does not exist. +func (b *readWriteBucket) NestedReadWriteBucket(key []byte) walletdb.ReadWriteBucket { + if len(key) == 0 { + return nil + } + + // Get the bucket id (and return nil if bucket doesn't exist). + bucketKey := makeBucketKey(b.id, key) + bucketVal, err := b.tx.stm.Get(string(bucketKey)) + if err != nil { + // TODO: we should return the error once the + // kvdb inteface is extended. + return nil + } + + if !isValidBucketID(bucketVal) { + return nil + } + + // Return the bucket with the fetched bucket id. + return newReadWriteBucket(b.tx, bucketKey, bucketVal) +} + +// CreateBucket creates and returns a new nested bucket with the given +// key. Returns ErrBucketExists if the bucket already exists, +// ErrBucketNameRequired if the key is empty, or ErrIncompatibleValue +// if the key value is otherwise invalid for the particular database +// implementation. Other errors are possible depending on the +// implementation. +func (b *readWriteBucket) CreateBucket(key []byte) ( + walletdb.ReadWriteBucket, error) { + + if len(key) == 0 { + return nil, walletdb.ErrBucketNameRequired + } + + // Check if the bucket already exists. + bucketKey := makeBucketKey(b.id, key) + + bucketVal, err := b.tx.stm.Get(string(bucketKey)) + if err != nil { + return nil, err + } + + if isValidBucketID(bucketVal) { + return nil, walletdb.ErrBucketExists + } + + // Create a deterministic bucket id from the bucket key. + newID := makeBucketID(bucketKey) + + // Create the bucket. + b.tx.put(string(bucketKey), string(newID[:])) + + return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil +} + +// CreateBucketIfNotExists creates and returns a new nested bucket with +// the given key if it does not already exist. Returns +// ErrBucketNameRequired if the key is empty or ErrIncompatibleValue +// if the key value is otherwise invalid for the particular database +// backend. Other errors are possible depending on the implementation. +func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) ( + walletdb.ReadWriteBucket, error) { + + if len(key) == 0 { + return nil, walletdb.ErrBucketNameRequired + } + + // Check for the bucket and create if it doesn't exist. + bucketKey := makeBucketKey(b.id, key) + + bucketVal, err := b.tx.stm.Get(string(bucketKey)) + if err != nil { + return nil, err + } + + if !isValidBucketID(bucketVal) { + newID := makeBucketID(bucketKey) + b.tx.put(string(bucketKey), string(newID[:])) + + return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil + } + + // Otherwise return the bucket with the fetched bucket id. + return newReadWriteBucket(b.tx, bucketKey, bucketVal), nil +} + +// DeleteNestedBucket deletes the nested bucket and its sub-buckets +// pointed to by the passed key. All values in the bucket and sub-buckets +// will be deleted as well. +func (b *readWriteBucket) DeleteNestedBucket(key []byte) error { + // TODO shouldn't empty key return ErrBucketNameRequired ? + if len(key) == 0 { + return walletdb.ErrIncompatibleValue + } + + // Get the bucket first. + bucketKey := string(makeBucketKey(b.id, key)) + + bucketVal, err := b.tx.stm.Get(bucketKey) + if err != nil { + return err + } + + if !isValidBucketID(bucketVal) { + return walletdb.ErrBucketNotFound + } + + // Enqueue the top level bucket id. + queue := [][]byte{bucketVal} + + // Traverse the buckets breadth first. + for len(queue) != 0 { + if !isValidBucketID(queue[0]) { + return walletdb.ErrBucketNotFound + } + + id := queue[0] + queue = queue[1:] + + // Delete values in the current bucket + valuePrefix := string(makeValuePrefix(id)) + + kv, err := b.tx.stm.First(valuePrefix) + if err != nil { + return err + } + + for kv != nil { + b.tx.del(kv.key) + + kv, err = b.tx.stm.Next(valuePrefix, kv.key) + if err != nil { + return err + } + } + + // Iterate sub buckets + bucketPrefix := string(makeBucketPrefix(id)) + + kv, err = b.tx.stm.First(bucketPrefix) + if err != nil { + return err + } + + for kv != nil { + // Delete sub bucket key. + b.tx.del(kv.key) + // Queue it for traversal. + queue = append(queue, []byte(kv.val)) + + kv, err = b.tx.stm.Next(bucketPrefix, kv.key) + if err != nil { + return err + } + } + } + + // Delete the top level bucket. + b.tx.del(bucketKey) + + return nil +} + +// Put updates the value for the passed key. +// Returns ErrKeyRequred if te passed key is empty. +func (b *readWriteBucket) Put(key, value []byte) error { + if len(key) == 0 { + return walletdb.ErrKeyRequired + } + + // Update the transaction with the new value. + b.tx.put(string(makeValueKey(b.id, key)), string(value)) + + return nil +} + +// Delete deletes the key/value pointed to by the passed key. +// Returns ErrKeyRequred if the passed key is empty. +func (b *readWriteBucket) Delete(key []byte) error { + if len(key) == 0 { + return walletdb.ErrKeyRequired + } + + // Update the transaction to delete the key/value. + b.tx.del(string(makeValueKey(b.id, key))) + + return nil +} + +// ReadWriteCursor returns a new read-write cursor for this bucket. +func (b *readWriteBucket) ReadWriteCursor() walletdb.ReadWriteCursor { + return newReadWriteCursor(b) +} + +// Tx returns the buckets transaction. +func (b *readWriteBucket) Tx() walletdb.ReadWriteTx { + return b.tx +} + +// NextSequence returns an autoincrementing sequence number for this bucket. +// Note that this is not a thread safe function and as such it must not be used +// for synchronization. +func (b *readWriteBucket) NextSequence() (uint64, error) { + seq := b.Sequence() + 1 + + return seq, b.SetSequence(seq) +} + +// SetSequence updates the sequence number for the bucket. +func (b *readWriteBucket) SetSequence(v uint64) error { + // Convert the number to string. + val := strconv.FormatUint(v, 10) + + // Update the transaction with the new value for the sequence key. + b.tx.put(string(makeSequenceKey(b.id)), val) + + return nil +} + +// Sequence returns the current sequence number for this bucket without +// incrementing it. +func (b *readWriteBucket) Sequence() uint64 { + val, err := b.tx.stm.Get(string(makeSequenceKey(b.id))) + if err != nil { + // TODO: This update kvdb interface such that error + // may be returned here. + return 0 + } + + if val == nil { + // If the sequence number is not yet + // stored, then take the default value. + return 0 + } + + // Otherwise try to parse a 64 bit unsigned integer from the value. + num, _ := strconv.ParseUint(string(val), 10, 64) + + return num +} diff --git a/channeldb/kvdb/etcd/readwrite_bucket_test.go b/channeldb/kvdb/etcd/readwrite_bucket_test.go new file mode 100644 index 0000000000..a3a5d62081 --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_bucket_test.go @@ -0,0 +1,404 @@ +// +build kvdb_etcd + +package etcd + +import ( + "fmt" + "math" + "testing" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/stretchr/testify/assert" +) + +func TestBucketCreation(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + // empty bucket name + b, err := tx.CreateTopLevelBucket(nil) + assert.Error(t, walletdb.ErrBucketNameRequired, err) + assert.Nil(t, b) + + // empty bucket name + b, err = tx.CreateTopLevelBucket([]byte("")) + assert.Error(t, walletdb.ErrBucketNameRequired, err) + assert.Nil(t, b) + + // "apple" + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, apple) + + // Check bucket tx. + assert.Equal(t, tx, apple.Tx()) + + // "apple" already created + b, err = tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, b) + + // "apple/banana" + banana, err := apple.CreateBucket([]byte("banana")) + assert.NoError(t, err) + assert.NotNil(t, banana) + + banana, err = apple.CreateBucketIfNotExists([]byte("banana")) + assert.NoError(t, err) + assert.NotNil(t, banana) + + // Try creating "apple/banana" again + b, err = apple.CreateBucket([]byte("banana")) + assert.Error(t, walletdb.ErrBucketExists, err) + assert.Nil(t, b) + + // "apple/mango" + mango, err := apple.CreateBucket([]byte("mango")) + assert.Nil(t, err) + assert.NotNil(t, mango) + + // "apple/banana/pear" + pear, err := banana.CreateBucket([]byte("pear")) + assert.Nil(t, err) + assert.NotNil(t, pear) + + // empty bucket + assert.Nil(t, apple.NestedReadWriteBucket(nil)) + assert.Nil(t, apple.NestedReadWriteBucket([]byte(""))) + + // "apple/pear" doesn't exist + assert.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) + + // "apple/banana" exits + assert.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) + assert.NotNil(t, apple.NestedReadBucket([]byte("banana"))) + return nil + }) + + assert.Nil(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + bkey("apple", "mango"): bval("apple", "mango"), + bkey("apple", "banana", "pear"): bval("apple", "banana", "pear"), + } + assert.Equal(t, expected, f.Dump()) +} + +func TestBucketDeletion(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + // "apple" + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + // "apple/banana" + banana, err := apple.CreateBucket([]byte("banana")) + assert.Nil(t, err) + assert.NotNil(t, banana) + + kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} + + for _, kv := range kvs { + assert.NoError(t, banana.Put([]byte(kv.key), []byte(kv.val))) + assert.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) + } + + // Delete a k/v from "apple/banana" + assert.NoError(t, banana.Delete([]byte("key2"))) + // Try getting/putting/deleting invalid k/v's. + assert.Nil(t, banana.Get(nil)) + assert.Error(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val"))) + assert.Error(t, walletdb.ErrKeyRequired, banana.Delete(nil)) + + // Try deleting a k/v that doesn't exist. + assert.NoError(t, banana.Delete([]byte("nokey"))) + + // "apple/pear" + pear, err := apple.CreateBucket([]byte("pear")) + assert.Nil(t, err) + assert.NotNil(t, pear) + + // Put some values into "apple/pear" + for _, kv := range kvs { + assert.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val))) + assert.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key))) + } + + // Create nested bucket "apple/pear/cherry" + cherry, err := pear.CreateBucket([]byte("cherry")) + assert.Nil(t, err) + assert.NotNil(t, cherry) + + // Put some values into "apple/pear/cherry" + for _, kv := range kvs { + assert.NoError(t, cherry.Put([]byte(kv.key), []byte(kv.val))) + } + + // Read back values in "apple/pear/cherry" trough a read bucket. + cherryReadBucket := pear.NestedReadBucket([]byte("cherry")) + for _, kv := range kvs { + assert.Equal( + t, []byte(kv.val), + cherryReadBucket.Get([]byte(kv.key)), + ) + } + + // Try deleting some invalid buckets. + assert.Error(t, + walletdb.ErrBucketNameRequired, apple.DeleteNestedBucket(nil), + ) + + // Try deleting a non existing bucket. + assert.Error( + t, + walletdb.ErrBucketNotFound, + apple.DeleteNestedBucket([]byte("missing")), + ) + + // Delete "apple/pear" + assert.Nil(t, apple.DeleteNestedBucket([]byte("pear"))) + + // "apple/pear" deleted + assert.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) + + // "apple/pear/cherry" deleted + assert.Nil(t, pear.NestedReadWriteBucket([]byte("cherry"))) + + // Values deleted too. + for _, kv := range kvs { + assert.Nil(t, pear.Get([]byte(kv.key))) + assert.Nil(t, cherry.Get([]byte(kv.key))) + } + + // "aple/banana" exists + assert.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) + return nil + }) + + assert.Nil(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + vkey("key1", "apple", "banana"): "val1", + vkey("key3", "apple", "banana"): "val3", + } + assert.Equal(t, expected, f.Dump()) +} + +func TestBucketForEach(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + // "apple" + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + // "apple/banana" + banana, err := apple.CreateBucket([]byte("banana")) + assert.Nil(t, err) + assert.NotNil(t, banana) + + kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} + + // put some values into "apple" and "apple/banana" too + for _, kv := range kvs { + assert.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) + assert.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) + + assert.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val))) + assert.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) + } + + got := make(map[string]string) + err = apple.ForEach(func(key, val []byte) error { + got[string(key)] = string(val) + return nil + }) + + expected := map[string]string{ + "key1": "val1", + "key2": "val2", + "key3": "val3", + "banana": "", + } + + assert.NoError(t, err) + assert.Equal(t, expected, got) + + got = make(map[string]string) + err = banana.ForEach(func(key, val []byte) error { + got[string(key)] = string(val) + return nil + }) + + assert.NoError(t, err) + // remove the sub-bucket key + delete(expected, "banana") + assert.Equal(t, expected, got) + + return nil + }) + + assert.Nil(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + vkey("key1", "apple"): "val1", + vkey("key2", "apple"): "val2", + vkey("key3", "apple"): "val3", + vkey("key1", "apple", "banana"): "val1", + vkey("key2", "apple", "banana"): "val2", + vkey("key3", "apple", "banana"): "val3", + } + assert.Equal(t, expected, f.Dump()) +} + +func TestBucketForEachWithError(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + // "apple" + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + // "apple/banana" + banana, err := apple.CreateBucket([]byte("banana")) + assert.Nil(t, err) + assert.NotNil(t, banana) + + // "apple/pear" + pear, err := apple.CreateBucket([]byte("pear")) + assert.Nil(t, err) + assert.NotNil(t, pear) + + kvs := []KV{{"key1", "val1"}, {"key2", "val2"}} + + // Put some values into "apple" and "apple/banana" too. + for _, kv := range kvs { + assert.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) + assert.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) + } + + got := make(map[string]string) + i := 0 + // Error while iterating value keys. + err = apple.ForEach(func(key, val []byte) error { + if i == 1 { + return fmt.Errorf("error") + } + + got[string(key)] = string(val) + i++ + return nil + }) + + expected := map[string]string{ + "key1": "val1", + } + + assert.Equal(t, expected, got) + assert.Error(t, err) + + got = make(map[string]string) + i = 0 + // Erro while iterating buckets. + err = apple.ForEach(func(key, val []byte) error { + if i == 3 { + return fmt.Errorf("error") + } + + got[string(key)] = string(val) + i++ + return nil + }) + + expected = map[string]string{ + "key1": "val1", + "key2": "val2", + "banana": "", + } + + assert.Equal(t, expected, got) + assert.Error(t, err) + return nil + }) + + assert.Nil(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + bkey("apple", "pear"): bval("apple", "pear"), + vkey("key1", "apple"): "val1", + vkey("key2", "apple"): "val2", + } + assert.Equal(t, expected, f.Dump()) +} + +func TestBucketSequence(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + banana, err := apple.CreateBucket([]byte("banana")) + assert.Nil(t, err) + assert.NotNil(t, banana) + + assert.Equal(t, uint64(0), apple.Sequence()) + assert.Equal(t, uint64(0), banana.Sequence()) + + assert.Nil(t, apple.SetSequence(math.MaxUint64)) + assert.Equal(t, uint64(math.MaxUint64), apple.Sequence()) + + for i := uint64(0); i < uint64(5); i++ { + s, err := apple.NextSequence() + assert.Nil(t, err) + assert.Equal(t, i, s) + } + + return nil + }) + + assert.Nil(t, err) +} diff --git a/channeldb/kvdb/etcd/readwrite_cursor.go b/channeldb/kvdb/etcd/readwrite_cursor.go new file mode 100644 index 0000000000..989656933a --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_cursor.go @@ -0,0 +1,145 @@ +// +build kvdb_etcd + +package etcd + +// readWriteCursor holds a reference to the cursors bucket, the value +// prefix and the current key used while iterating. +type readWriteCursor struct { + // bucket holds the reference to the parent bucket. + bucket *readWriteBucket + + // prefix holds the value prefix which is in front of each + // value key in the bucket. + prefix string + + // currKey holds the current key of the cursor. + currKey string +} + +func newReadWriteCursor(bucket *readWriteBucket) *readWriteCursor { + return &readWriteCursor{ + bucket: bucket, + prefix: string(makeValuePrefix(bucket.id)), + } +} + +// First positions the cursor at the first key/value pair and returns +// the pair. +func (c *readWriteCursor) First() (key, value []byte) { + // Get the first key with the value prefix. + kv, err := c.bucket.tx.stm.First(c.prefix) + if err != nil { + // TODO: revise this once kvdb interface supports errors + return nil, nil + } + + if kv != nil { + c.currKey = kv.key + // Chop the prefix and return the key/value. + return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + } + + return nil, nil +} + +// Last positions the cursor at the last key/value pair and returns the +// pair. +func (c *readWriteCursor) Last() (key, value []byte) { + kv, err := c.bucket.tx.stm.Last(c.prefix) + if err != nil { + // TODO: revise this once kvdb interface supports errors + return nil, nil + } + + if kv != nil { + c.currKey = kv.key + // Chop the prefix and return the key/value. + return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + } + + return nil, nil +} + +// Next moves the cursor one key/value pair forward and returns the new +// pair. +func (c *readWriteCursor) Next() (key, value []byte) { + kv, err := c.bucket.tx.stm.Next(c.prefix, c.currKey) + if err != nil { + // TODO: revise this once kvdb interface supports errors + return nil, nil + } + + if kv != nil { + c.currKey = kv.key + // Chop the prefix and return the key/value. + return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + } + + return nil, nil +} + +// Prev moves the cursor one key/value pair backward and returns the new +// pair. +func (c *readWriteCursor) Prev() (key, value []byte) { + kv, err := c.bucket.tx.stm.Prev(c.prefix, c.currKey) + if err != nil { + // TODO: revise this once kvdb interface supports errors + return nil, nil + } + + if kv != nil { + c.currKey = kv.key + // Chop the prefix and return the key/value. + return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + } + + return nil, nil +} + +// Seek positions the cursor at the passed seek key. If the key does +// not exist, the cursor is moved to the next key after seek. Returns +// the new pair. +func (c *readWriteCursor) Seek(seek []byte) (key, value []byte) { + // Return nil if trying to seek to an empty key. + if seek == nil { + return nil, nil + } + + // Seek to the first key with prefix + seek. If that key is not present + // STM will seek to the next matching key with prefix. + kv, err := c.bucket.tx.stm.Seek(c.prefix, c.prefix+string(seek)) + if err != nil { + // TODO: revise this once kvdb interface supports errors + return nil, nil + } + + if kv != nil { + c.currKey = kv.key + // Chop the prefix and return the key/value. + return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + } + + return nil, nil +} + +// Delete removes the current key/value pair the cursor is at without +// invalidating the cursor. Returns ErrIncompatibleValue if attempted +// when the cursor points to a nested bucket. +func (c *readWriteCursor) Delete() error { + // Get the next key after the current one. We could do this + // after deletion too but it's one step more efficient here. + nextKey, err := c.bucket.tx.stm.Next(c.prefix, c.currKey) + if err != nil { + return err + } + + // Delete the current key. + c.bucket.tx.stm.Del(c.currKey) + + // Set current key to the next one if possible. + if nextKey != nil { + c.currKey = nextKey.key + } + + return nil +} diff --git a/channeldb/kvdb/etcd/readwrite_cursor_test.go b/channeldb/kvdb/etcd/readwrite_cursor_test.go new file mode 100644 index 0000000000..c14de7aa84 --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_cursor_test.go @@ -0,0 +1,293 @@ +// +build kvdb_etcd + +package etcd + +import ( + "testing" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/stretchr/testify/assert" +) + +func TestReadCursorEmptyInterval(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + b, err := tx.CreateTopLevelBucket([]byte("alma")) + assert.NoError(t, err) + assert.NotNil(t, b) + + return nil + }) + assert.NoError(t, err) + + err = db.View(func(tx walletdb.ReadTx) error { + b := tx.ReadBucket([]byte("alma")) + assert.NotNil(t, b) + + cursor := b.ReadCursor() + k, v := cursor.First() + assert.Nil(t, k) + assert.Nil(t, v) + + k, v = cursor.Next() + assert.Nil(t, k) + assert.Nil(t, v) + + k, v = cursor.Last() + assert.Nil(t, k) + assert.Nil(t, v) + + k, v = cursor.Prev() + assert.Nil(t, k) + assert.Nil(t, v) + + return nil + }) + assert.NoError(t, err) +} + +func TestReadCursorNonEmptyInterval(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + testKeyValues := []KV{ + {"b", "1"}, + {"c", "2"}, + {"da", "3"}, + {"e", "4"}, + } + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + b, err := tx.CreateTopLevelBucket([]byte("alma")) + assert.NoError(t, err) + assert.NotNil(t, b) + + for _, kv := range testKeyValues { + assert.NoError(t, b.Put([]byte(kv.key), []byte(kv.val))) + } + return nil + }) + + assert.NoError(t, err) + + err = db.View(func(tx walletdb.ReadTx) error { + b := tx.ReadBucket([]byte("alma")) + assert.NotNil(t, b) + + // Iterate from the front. + var kvs []KV + cursor := b.ReadCursor() + k, v := cursor.First() + + for k != nil && v != nil { + kvs = append(kvs, KV{string(k), string(v)}) + k, v = cursor.Next() + } + assert.Equal(t, testKeyValues, kvs) + + // Iterate from the back. + kvs = []KV{} + k, v = cursor.Last() + + for k != nil && v != nil { + kvs = append(kvs, KV{string(k), string(v)}) + k, v = cursor.Prev() + } + assert.Equal(t, reverseKVs(testKeyValues), kvs) + + // Random access + perm := []int{3, 0, 2, 1} + for _, i := range perm { + k, v := cursor.Seek([]byte(testKeyValues[i].key)) + assert.Equal(t, []byte(testKeyValues[i].key), k) + assert.Equal(t, []byte(testKeyValues[i].val), v) + } + + // Seek to nonexisting key. + k, v = cursor.Seek(nil) + assert.Nil(t, k) + assert.Nil(t, v) + + k, v = cursor.Seek([]byte("x")) + assert.Nil(t, k) + assert.Nil(t, v) + + return nil + }) + + assert.NoError(t, err) +} + +func TestReadWriteCursor(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + testKeyValues := []KV{ + {"b", "1"}, + {"c", "2"}, + {"da", "3"}, + {"e", "4"}, + } + + count := len(testKeyValues) + + // Pre-store the first half of the interval. + assert.NoError(t, db.Update(func(tx walletdb.ReadWriteTx) error { + b, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, b) + + for i := 0; i < count/2; i++ { + err = b.Put( + []byte(testKeyValues[i].key), + []byte(testKeyValues[i].val), + ) + assert.NoError(t, err) + } + return nil + })) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + b := tx.ReadWriteBucket([]byte("apple")) + assert.NotNil(t, b) + + // Store the second half of the interval. + for i := count / 2; i < count; i++ { + err = b.Put( + []byte(testKeyValues[i].key), + []byte(testKeyValues[i].val), + ) + assert.NoError(t, err) + } + + cursor := b.ReadWriteCursor() + + // First on valid interval. + fk, fv := cursor.First() + assert.Equal(t, []byte("b"), fk) + assert.Equal(t, []byte("1"), fv) + + // Prev(First()) = nil + k, v := cursor.Prev() + assert.Nil(t, k) + assert.Nil(t, v) + + // Last on valid interval. + lk, lv := cursor.Last() + assert.Equal(t, []byte("e"), lk) + assert.Equal(t, []byte("4"), lv) + + // Next(Last()) = nil + k, v = cursor.Next() + assert.Nil(t, k) + assert.Nil(t, v) + + // Delete first item, then add an item before the + // deleted one. Check that First/Next will "jump" + // over the deleted item and return the new first. + _, _ = cursor.First() + assert.NoError(t, cursor.Delete()) + assert.NoError(t, b.Put([]byte("a"), []byte("0"))) + fk, fv = cursor.First() + + assert.Equal(t, []byte("a"), fk) + assert.Equal(t, []byte("0"), fv) + + k, v = cursor.Next() + assert.Equal(t, []byte("c"), k) + assert.Equal(t, []byte("2"), v) + + // Similarly test that a new end is returned if + // the old end is deleted first. + _, _ = cursor.Last() + assert.NoError(t, cursor.Delete()) + assert.NoError(t, b.Put([]byte("f"), []byte("5"))) + + lk, lv = cursor.Last() + assert.Equal(t, []byte("f"), lk) + assert.Equal(t, []byte("5"), lv) + + k, v = cursor.Prev() + assert.Equal(t, []byte("da"), k) + assert.Equal(t, []byte("3"), v) + + // Overwrite k/v in the middle of the interval. + assert.NoError(t, b.Put([]byte("c"), []byte("3"))) + k, v = cursor.Prev() + assert.Equal(t, []byte("c"), k) + assert.Equal(t, []byte("3"), v) + + // Insert new key/values. + assert.NoError(t, b.Put([]byte("cx"), []byte("x"))) + assert.NoError(t, b.Put([]byte("cy"), []byte("y"))) + + k, v = cursor.Next() + assert.Equal(t, []byte("cx"), k) + assert.Equal(t, []byte("x"), v) + + k, v = cursor.Next() + assert.Equal(t, []byte("cy"), k) + assert.Equal(t, []byte("y"), v) + + expected := []KV{ + {"a", "0"}, + {"c", "3"}, + {"cx", "x"}, + {"cy", "y"}, + {"da", "3"}, + {"f", "5"}, + } + + // Iterate from the front. + var kvs []KV + k, v = cursor.First() + + for k != nil && v != nil { + kvs = append(kvs, KV{string(k), string(v)}) + k, v = cursor.Next() + } + assert.Equal(t, expected, kvs) + + // Iterate from the back. + kvs = []KV{} + k, v = cursor.Last() + + for k != nil && v != nil { + kvs = append(kvs, KV{string(k), string(v)}) + k, v = cursor.Prev() + } + assert.Equal(t, reverseKVs(expected), kvs) + + return nil + }) + + assert.NoError(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + vkey("a", "apple"): "0", + vkey("c", "apple"): "3", + vkey("cx", "apple"): "x", + vkey("cy", "apple"): "y", + vkey("da", "apple"): "3", + vkey("f", "apple"): "5", + } + assert.Equal(t, expected, f.Dump()) +} diff --git a/channeldb/kvdb/etcd/readwrite_tx.go b/channeldb/kvdb/etcd/readwrite_tx.go new file mode 100644 index 0000000000..22d0ce421e --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_tx.go @@ -0,0 +1,152 @@ +// +build kvdb_etcd + +package etcd + +import ( + "github.com/btcsuite/btcwallet/walletdb" +) + +// readWriteTx holds a reference to the STM transaction. +type readWriteTx struct { + // stm is the reference to the parent STM. + stm STM + + // rootBucketID holds the sha256 hash of the root bucket id, which is used + // for key space spearation. + rootBucketID [bucketIDLength]byte + + // active is true if the transaction hasn't been committed yet. + active bool + + // dirty is true if we intent to update a value in this transaction. + dirty bool + + // lset holds key/value set that we want to lock on. If upon commit the + // transaction is dirty and the lset is not empty, we'll bump the mod + // version of these key/values. + lset map[string]string +} + +// newReadWriteTx creates an rw transaction with the passed STM. +func newReadWriteTx(stm STM, prefix string) *readWriteTx { + return &readWriteTx{ + stm: stm, + active: true, + rootBucketID: makeBucketID([]byte(prefix)), + lset: make(map[string]string), + } +} + +// rooBucket is a helper function to return the always present +// pseudo root bucket. +func rootBucket(tx *readWriteTx) *readWriteBucket { + return newReadWriteBucket(tx, tx.rootBucketID[:], tx.rootBucketID[:]) +} + +// lock adds a key value to the lock set. +func (tx *readWriteTx) lock(key, val string) { + tx.stm.Lock(key) + if !tx.dirty { + tx.lset[key] = val + } else { + // Bump the mod version of the key, + // leaving the value intact. + tx.stm.Put(key, val) + } +} + +// put updates the passed key/value. +func (tx *readWriteTx) put(key, val string) { + tx.stm.Put(key, val) + tx.setDirty() +} + +// del marks the passed key deleted. +func (tx *readWriteTx) del(key string) { + tx.stm.Del(key) + tx.setDirty() +} + +// setDirty marks the transaction dirty and bumps +// mod version for the existing lock set if it is +// not empty. +func (tx *readWriteTx) setDirty() { + // Bump the lock set. + if !tx.dirty && len(tx.lset) > 0 { + for key, val := range tx.lset { + // Bump the mod version of the key, + // leaving the value intact. + tx.stm.Put(key, val) + } + + // Clear the lock set. + tx.lset = make(map[string]string) + } + + // Set dirty. + tx.dirty = true +} + +// ReadBucket opens the root bucket for read only access. If the bucket +// described by the key does not exist, nil is returned. +func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket { + return rootBucket(tx).NestedReadWriteBucket(key) +} + +// Rollback closes the transaction, discarding changes (if any) if the +// database was modified by a write transaction. +func (tx *readWriteTx) Rollback() error { + // If the transaction has been closed roolback will fail. + if !tx.active { + return walletdb.ErrTxClosed + } + + // Rollback the STM and set the tx to inactive. + tx.stm.Rollback() + tx.active = false + + return nil +} + +// ReadWriteBucket opens the root bucket for read/write access. If the +// bucket described by the key does not exist, nil is returned. +func (tx *readWriteTx) ReadWriteBucket(key []byte) walletdb.ReadWriteBucket { + return rootBucket(tx).NestedReadWriteBucket(key) +} + +// CreateTopLevelBucket creates the top level bucket for a key if it +// does not exist. The newly-created bucket it returned. +func (tx *readWriteTx) CreateTopLevelBucket(key []byte) (walletdb.ReadWriteBucket, error) { + return rootBucket(tx).CreateBucketIfNotExists(key) +} + +// DeleteTopLevelBucket deletes the top level bucket for a key. This +// errors if the bucket can not be found or the key keys a single value +// instead of a bucket. +func (tx *readWriteTx) DeleteTopLevelBucket(key []byte) error { + return rootBucket(tx).DeleteNestedBucket(key) +} + +// Commit commits the transaction if not already committed. Will return +// error if the underlying STM fails. +func (tx *readWriteTx) Commit() error { + // Commit will fail if the transaction is already committed. + if !tx.active { + return walletdb.ErrTxClosed + } + + // Try committing the transaction. + if err := tx.stm.Commit(); err != nil { + return err + } + + // Mark the transaction as not active after commit. + tx.active = false + + return nil +} + +// OnCommit sets the commit callback (overriding if already set). +func (tx *readWriteTx) OnCommit(cb func()) { + tx.stm.OnCommit(cb) +} diff --git a/channeldb/kvdb/etcd/readwrite_tx_test.go b/channeldb/kvdb/etcd/readwrite_tx_test.go new file mode 100644 index 0000000000..f65faa5456 --- /dev/null +++ b/channeldb/kvdb/etcd/readwrite_tx_test.go @@ -0,0 +1,156 @@ +// +build kvdb_etcd + +package etcd + +import ( + "testing" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/stretchr/testify/assert" +) + +func TestTxManualCommit(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + tx, err := db.BeginReadWriteTx() + assert.NoError(t, err) + assert.NotNil(t, tx) + + committed := false + + tx.OnCommit(func() { + committed = true + }) + + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, apple) + assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + + banana, err := tx.CreateTopLevelBucket([]byte("banana")) + assert.NoError(t, err) + assert.NotNil(t, banana) + assert.NoError(t, banana.Put([]byte("testKey"), []byte("testVal"))) + assert.NoError(t, tx.DeleteTopLevelBucket([]byte("banana"))) + + assert.NoError(t, tx.Commit()) + assert.True(t, committed) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + vkey("testKey", "apple"): "testVal", + } + assert.Equal(t, expected, f.Dump()) +} + +func TestTxRollback(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + tx, err := db.BeginReadWriteTx() + assert.Nil(t, err) + assert.NotNil(t, tx) + + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + + assert.NoError(t, tx.Rollback()) + assert.Error(t, walletdb.ErrTxClosed, tx.Commit()) + assert.Equal(t, map[string]string{}, f.Dump()) +} + +func TestChangeDuringManualTx(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + tx, err := db.BeginReadWriteTx() + assert.Nil(t, err) + assert.NotNil(t, tx) + + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.Nil(t, err) + assert.NotNil(t, apple) + + assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + + // Try overwriting the bucket key. + f.Put(bkey("apple"), "banana") + + // TODO: translate error + assert.NotNil(t, tx.Commit()) + assert.Equal(t, map[string]string{ + bkey("apple"): "banana", + }, f.Dump()) +} + +func TestChangeDuringUpdate(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + count := 0 + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + assert.NoError(t, err) + assert.NotNil(t, apple) + + assert.NoError(t, apple.Put([]byte("key"), []byte("value"))) + + if count == 0 { + f.Put(vkey("key", "apple"), "new_value") + f.Put(vkey("key2", "apple"), "value2") + } + + cursor := apple.ReadCursor() + k, v := cursor.First() + assert.Equal(t, []byte("key"), k) + assert.Equal(t, []byte("value"), v) + assert.Equal(t, v, apple.Get([]byte("key"))) + + k, v = cursor.Next() + if count == 0 { + assert.Nil(t, k) + assert.Nil(t, v) + } else { + assert.Equal(t, []byte("key2"), k) + assert.Equal(t, []byte("value2"), v) + } + + count++ + return nil + }) + + assert.Nil(t, err) + assert.Equal(t, count, 2) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + vkey("key", "apple"): "value", + vkey("key2", "apple"): "value2", + } + assert.Equal(t, expected, f.Dump()) +} diff --git a/channeldb/kvdb/etcd/stm.go b/channeldb/kvdb/etcd/stm.go new file mode 100644 index 0000000000..a3f8c22335 --- /dev/null +++ b/channeldb/kvdb/etcd/stm.go @@ -0,0 +1,802 @@ +// +build kvdb_etcd + +package etcd + +import ( + "context" + "fmt" + "math" + "strings" + + v3 "github.com/coreos/etcd/clientv3" +) + +type CommitStats struct { + Rset int + Wset int + Retries int +} + +// KV stores a key/value pair. +type KV struct { + key string + val string +} + +// STM is an interface for software transactional memory. +// All calls that return error will do so only if STM is manually handled and +// abort the apply closure otherwise. In both case the returned error is a +// DatabaseError. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read + // set. Returns nil if there's no matching key, or the key is empty. + Get(key string) ([]byte, error) + + // Lock adds a key to the lock set. If the lock set is not empty, we'll + // only check for conflicts in the lock set and the write set, instead + // of all read keys plus the write set. + Lock(key string) + + // Put adds a value for a key to the txn's write set. + Put(key, val string) + + // Del adds a delete operation for the key to the txn's write set. + Del(key string) + + // First returns the first k/v that begins with prefix or nil if there's + // no such k/v pair. If the key is found it is inserted to the txn's + // read set. Returns nil if there's no match. + First(prefix string) (*KV, error) + + // Last returns the last k/v that begins with prefix or nil if there's + // no such k/v pair. If the key is found it is inserted to the txn's + // read set. Returns nil if there's no match. + Last(prefix string) (*KV, error) + + // Prev returns the previous k/v before key that begins with prefix or + // nil if there's no such k/v. If the key is found it is inserted to the + // read set. Returns nil if there's no match. + Prev(prefix, key string) (*KV, error) + + // Next returns the next k/v after key that begins with prefix or nil + // if there's no such k/v. If the key is found it is inserted to the + // txn's read set. Returns nil if there's no match. + Next(prefix, key string) (*KV, error) + + // Seek will return k/v at key beginning with prefix. If the key doesn't + // exists Seek will return the next k/v after key beginning with prefix. + // If a matching k/v is found it is inserted to the txn's read set. Returns + // nil if there's no match. + Seek(prefix, key string) (*KV, error) + + // OnCommit calls the passed callback func upon commit. + OnCommit(func()) + + // Commit attempts to apply the txn's changes to the server. + // Commit may return CommitError if transaction is outdated and needs retry. + Commit() error + + // Rollback emties the read and write sets such that a subsequent commit + // won't alter the database. + Rollback() +} + +// CommitError is used to check if there was an error +// due to stale data in the transaction. +type CommitError struct{} + +// Error returns a static string for CommitError for +// debugging/logging purposes. +func (e CommitError) Error() string { + return "commit failed" +} + +// DatabaseError is used to wrap errors that are not +// related to stale data in the transaction. +type DatabaseError struct { + msg string + err error +} + +// Unwrap returns the wrapped error in a DatabaseError. +func (e *DatabaseError) Unwrap() error { + return e.err +} + +// Error simply converts DatabaseError to a string that +// includes both the message and the wrapped error. +func (e DatabaseError) Error() string { + return fmt.Sprintf("etcd error: %v - %v", e.msg, e.err) +} + +// stmGet is the result of a read operation, +// a value and the mod revision of the key/value. +type stmGet struct { + val string + rev int64 +} + +// readSet stores all reads done in an STM. +type readSet map[string]stmGet + +// stmPut stores a value and an operation (put/delete). +type stmPut struct { + val string + op v3.Op +} + +// writeSet stroes all writes done in an STM. +type writeSet map[string]stmPut + +// stm implements repeatable-read software transactional memory +// over etcd. +type stm struct { + // client is an etcd client handling all RPC communications + // to the etcd instance/cluster. + client *v3.Client + + // manual is set to true for manual transactions which don't + // execute in the STM run loop. + manual bool + + // options stores optional settings passed by the user. + options *STMOptions + + // prefetch hold prefetched key values and revisions. + prefetch readSet + + // rset holds read key values and revisions. + rset readSet + + // wset holds overwritten keys and their values. + wset writeSet + + // lset holds keys we intent to lock on. + lset map[string]interface{} + + // getOpts are the opts used for gets. + getOpts []v3.OpOption + + // revision stores the snapshot revision after first read. + revision int64 + + // onCommit gets called upon commit. + onCommit func() +} + +// STMOptions can be used to pass optional settings +// when an STM is created. +type STMOptions struct { + // ctx holds an externally provided abort context. + ctx context.Context + commitStatsCallback func(bool, CommitStats) +} + +// STMOptionFunc is a function that updates the passed STMOptions. +type STMOptionFunc func(*STMOptions) + +// WithAbortContext specifies the context for permanently +// aborting the transaction. +func WithAbortContext(ctx context.Context) STMOptionFunc { + return func(so *STMOptions) { + so.ctx = ctx + } +} + +func WithCommitStatsCallback(cb func(bool, CommitStats)) STMOptionFunc { + return func(so *STMOptions) { + so.commitStatsCallback = cb + } +} + +// RunSTM runs the apply function by creating an STM using serializable snapshot +// isolation, passing it to the apply and handling commit errors and retries. +func RunSTM(cli *v3.Client, apply func(STM) error, so ...STMOptionFunc) error { + return runSTM(makeSTM(cli, false, so...), apply) +} + +// NewSTM creates a new STM instance, using serializable snapshot isolation. +func NewSTM(cli *v3.Client, so ...STMOptionFunc) STM { + return makeSTM(cli, true, so...) +} + +// makeSTM is the actual constructor of the stm. It first apply all passed +// options then creates the stm object and resets it before returning. +func makeSTM(cli *v3.Client, manual bool, so ...STMOptionFunc) *stm { + opts := &STMOptions{ + ctx: cli.Ctx(), + } + + // Apply all functional options. + for _, fo := range so { + fo(opts) + } + + s := &stm{ + client: cli, + manual: manual, + options: opts, + prefetch: make(map[string]stmGet), + } + + // Reset read and write set. + s.Rollback() + + return s +} + +// runSTM implements the run loop of the STM, running the apply func, catching +// errors and handling commit. The loop will quit on every error except +// CommitError which is used to indicate a necessary retry. +func runSTM(s *stm, apply func(STM) error) error { + out := make(chan error, 1) + + go func() { + var ( + retries int + stats CommitStats + ) + + defer func() { + // Recover DatabaseError panics so + // we can return them. + if r := recover(); r != nil { + e, ok := r.(DatabaseError) + if !ok { + // Unknown panic. + panic(r) + } + + // Return the error. + out <- e.Unwrap() + } + }() + + var err error + + // In a loop try to apply and commit and roll back + // if the database has changed (CommitError). + for { + // Abort STM if there was an application error. + if err = apply(s); err != nil { + break + } + + stats, err = s.commit() + + // Re-apply only upon commit error + // (meaning the database was changed). + if _, ok := err.(CommitError); !ok { + // Anything that's not a CommitError + // aborts the STM run loop. + break + } + + // Rollback before trying to re-apply. + s.Rollback() + retries++ + } + + if s.options.commitStatsCallback != nil { + stats.Retries = retries + s.options.commitStatsCallback(err == nil, stats) + } + + // Return the error to the caller. + out <- err + }() + + return <-out +} + +// add inserts a txn response to the read set. This is useful when the txn +// fails due to conflict where the txn response can be used to prefetch +// key/values. +func (rs readSet) add(txnResp *v3.TxnResponse) { + for _, resp := range txnResp.Responses { + getResp := (*v3.GetResponse)(resp.GetResponseRange()) + for _, kv := range getResp.Kvs { + rs[string(kv.Key)] = stmGet{ + val: string(kv.Value), + rev: kv.ModRevision, + } + } + } +} + +// gets is a helper to create an op slice for transaction +// construction. +func (rs readSet) gets() []v3.Op { + ops := make([]v3.Op, 0, len(rs)) + + for k := range rs { + ops = append(ops, v3.OpGet(k)) + } + + return ops +} + +// cmps returns a cmp list testing values in read set didn't change. +func (rs readSet) cmps(lset map[string]interface{}) []v3.Cmp { + if len(lset) > 0 { + cmps := make([]v3.Cmp, 0, len(lset)) + for key, _ := range lset { + if getValue, ok := rs[key]; ok { + cmps = append( + cmps, + v3.Compare(v3.ModRevision(key), "=", getValue.rev), + ) + } + } + return cmps + } + + cmps := make([]v3.Cmp, 0, len(rs)) + for key, getValue := range rs { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "=", getValue.rev)) + } + + return cmps +} + +// cmps returns a cmp list testing no writes have happened past rev. +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + + return cmps +} + +// puts is the list of ops for all pending writes. +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + + return puts +} + +// fetch is a helper to fetch key/value given options. If a value is returned +// then fetch will try to fix the STM's snapshot revision (if not already set). +// We'll also cache the returned key/value in the read set. +func (s *stm) fetch(key string, opts ...v3.OpOption) ([]KV, error) { + resp, err := s.client.Get( + s.options.ctx, key, append(opts, s.getOpts...)..., + ) + if err != nil { + dbErr := DatabaseError{ + msg: "stm.fetch() failed", + err: err, + } + + // Do not panic when executing a manual transaction. + if s.manual { + return nil, dbErr + } + + // Panic when executing inside the STM runloop. + panic(dbErr) + } + + // Set revison and serializable options upon first fetch + // for any subsequent fetches. + if s.getOpts == nil { + s.revision = resp.Header.Revision + s.getOpts = []v3.OpOption{ + v3.WithRev(s.revision), + v3.WithSerializable(), + } + } + + var result []KV + + // Fill the read set with key/values returned. + for _, kv := range resp.Kvs { + // Remove from prefetch. + key := string(kv.Key) + val := string(kv.Value) + + delete(s.prefetch, key) + + // Add to read set. + s.rset[key] = stmGet{ + val: val, + rev: kv.ModRevision, + } + + result = append(result, KV{key, val}) + } + + return result, nil +} + +// Get returns the value for key. If there's no such +// key/value in the database or the passed key is empty +// Get will return nil. +func (s *stm) Get(key string) ([]byte, error) { + if key == "" { + return nil, nil + } + + // Return freshly written value if present. + if put, ok := s.wset[key]; ok { + if put.op.IsDelete() { + return nil, nil + } + + return []byte(put.val), nil + } + + // Populate read set if key is present in + // the prefetch set. + if getValue, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = getValue + } + + // Return value if alread in read set. + if getVal, ok := s.rset[key]; ok { + return []byte(getVal.val), nil + } + + // Fetch and return value. + kvs, err := s.fetch(key) + if err != nil { + return nil, err + } + + if len(kvs) > 0 { + return []byte(kvs[0].val), nil + } + + // Return empty result if key not in DB. + return nil, nil +} + +// Lock adds a key to the lock set. If the lock set is +// not empty, we'll only check conflicts for the keys +// in the lock set. +func (s *stm) Lock(key string) { + s.lset[key] = nil +} + +// First returns the first key/value matching prefix. If there's no key starting +// with prefix, Last will return nil. +func (s *stm) First(prefix string) (*KV, error) { + return s.next(prefix, prefix, true) +} + +// Last returns the last key/value with prefix. If there's no key starting with +// prefix, Last will return nil. +func (s *stm) Last(prefix string) (*KV, error) { + // As we don't know the full range, fetch the last + // key/value with this prefix first. + resp, err := s.fetch(prefix, v3.WithLastKey()...) + if err != nil { + return nil, err + } + + var ( + kv KV + found bool + ) + + if len(resp) > 0 { + kv = resp[0] + found = true + } + + // Now make sure there's nothing in the write set + // that is a better match, meaning it has the same + // prefix but is greater or equal than the current + // best candidate. Note that this is not efficient + // when the write set is large! + for k, put := range s.wset { + if put.op.IsDelete() { + continue + } + + if strings.HasPrefix(k, prefix) && k >= kv.key { + kv.key = k + kv.val = put.val + found = true + } + } + + if found { + return &kv, nil + } + + return nil, nil +} + +// Prev returns the prior key/value before key (with prefix). If there's no such +// key Next will return nil. +func (s *stm) Prev(prefix, startKey string) (*KV, error) { + var result KV + + fetchKey := startKey + matchFound := false + + for { + // Ask etcd to retrieve one key that is a + // match in descending order from the passed key. + opts := []v3.OpOption{ + v3.WithRange(fetchKey), + v3.WithSort(v3.SortByKey, v3.SortDescend), + v3.WithLimit(1), + } + + kvs, err := s.fetch(prefix, opts...) + if err != nil { + return nil, err + } + + if len(kvs) == 0 { + break + } + + kv := &kvs[0] + + // WithRange and WithPrefix can't be used + // together, so check prefix here. If the + // returned key no longer has the prefix, + // then break out. + if !strings.HasPrefix(kv.key, prefix) { + break + } + + // Fetch the prior key if this is deleted. + if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() { + fetchKey = kv.key + continue + } + + result = *kv + matchFound = true + + break + } + + // Closre holding all checks to find a possibly + // better match. + matches := func(key string) bool { + if !strings.HasPrefix(key, prefix) { + return false + } + + if !matchFound { + return key < startKey + } + + // matchFound == true + return result.key <= key && key < startKey + } + + // Now go trough the write set and check + // if there's an even better match. + for k, put := range s.wset { + if !put.op.IsDelete() && matches(k) { + result.key = k + result.val = put.val + matchFound = true + } + } + + if !matchFound { + return nil, nil + } + + return &result, nil +} + +// Next returns the next key/value after key (with prefix). If there's no such +// key Next will return nil. +func (s *stm) Next(prefix string, key string) (*KV, error) { + return s.next(prefix, key, false) +} + +// Seek "seeks" to the key (with prefix). If the key doesn't exists it'll get +// the next key with the same prefix. If no key fills this criteria, Seek will +// return nil. +func (s *stm) Seek(prefix, key string) (*KV, error) { + return s.next(prefix, key, true) +} + +// next will try to retrieve the next match that has prefix and starts with the +// passed startKey. If includeStartKey is set to true, it'll return the value +// of startKey (essentially implementing seek). +func (s *stm) next(prefix, startKey string, includeStartKey bool) (*KV, error) { + var result KV + + fetchKey := startKey + firstFetch := true + matchFound := false + + for { + // Ask etcd to retrieve one key that is a + // match in ascending order from the passed key. + opts := []v3.OpOption{ + v3.WithFromKey(), + v3.WithSort(v3.SortByKey, v3.SortAscend), + v3.WithLimit(1), + } + + // By default we include the start key too + // if it is a full match. + if includeStartKey && firstFetch { + firstFetch = false + } else { + // If we'd like to retrieve the first key + // after the start key. + fetchKey += "\x00" + } + + kvs, err := s.fetch(fetchKey, opts...) + if err != nil { + return nil, err + } + + if len(kvs) == 0 { + break + } + + kv := &kvs[0] + // WithRange and WithPrefix can't be used + // together, so check prefix here. If the + // returned key no longer has the prefix, + // then break the fetch loop. + if !strings.HasPrefix(kv.key, prefix) { + break + } + + // Move on to fetch starting with the next + // key if this one is marked deleted. + if put, ok := s.wset[kv.key]; ok && put.op.IsDelete() { + fetchKey = kv.key + continue + } + + result = *kv + matchFound = true + + break + } + + // Closure holding all checks to find a possibly + // better match. + matches := func(k string) bool { + if !strings.HasPrefix(k, prefix) { + return false + } + + if includeStartKey && !matchFound { + return startKey <= k + } + + if !includeStartKey && !matchFound { + return startKey < k + } + + if includeStartKey && matchFound { + return startKey <= k && k <= result.key + } + + // !includeStartKey && matchFound. + return startKey < k && k <= result.key + } + + // Now go trough the write set and check + // if there's an even better match. + for k, put := range s.wset { + if !put.op.IsDelete() && matches(k) { + result.key = k + result.val = put.val + matchFound = true + } + } + + if !matchFound { + return nil, nil + } + + return &result, nil +} + +// Put sets the value of the passed key. The actual put will happen upon commit. +func (s *stm) Put(key, val string) { + s.wset[key] = stmPut{ + val: val, + op: v3.OpPut(key, val), + } +} + +// Del marks a key as deleted. The actual delete will happen upon commit. +func (s *stm) Del(key string) { + s.wset[key] = stmPut{ + val: "", + op: v3.OpDelete(key), + } +} + +// OnCommit sets the callback that is called upon committing the STM +// transaction. +func (s *stm) OnCommit(cb func()) { + s.onCommit = cb +} + +// commit builds the final transaction and tries to execute it. If commit fails +// because the keys have changed return a CommitError, otherwise return a +// DatabaseError. +func (s *stm) commit() (CommitStats, error) { + rset := s.rset.cmps(s.lset) + wset := s.wset.cmps(s.revision + 1) + + stats := CommitStats{ + Rset: len(rset), + Wset: len(wset), + } + + // Create the compare set. + cmps := append(rset, wset...) + // Create a transaction with the optional abort context. + txn := s.client.Txn(s.options.ctx) + + // If the compare set holds, try executing the puts. + txn = txn.If(cmps...) + txn = txn.Then(s.wset.puts()...) + + // Prefetch keys in case of conflict to save + // a round trip to etcd. + txn = txn.Else(s.rset.gets()...) + + txnresp, err := txn.Commit() + if err != nil { + return stats, DatabaseError{ + msg: "stm.Commit() failed", + err: err, + } + } + + // Call the commit callback if the transaction + // was successful. + if txnresp.Succeeded { + if s.onCommit != nil { + s.onCommit() + } + + return stats, nil + } + + // Load prefetch before if commit failed. + s.rset.add(txnresp) + s.prefetch = s.rset + + // Return CommitError indicating that the transaction + // can be retried. + return stats, CommitError{} +} + +// Commit simply calls commit and the commit stats callback if set. +func (s *stm) Commit() error { + stats, err := s.commit() + + if s.options.commitStatsCallback != nil { + s.options.commitStatsCallback(err == nil, stats) + } + + return err +} + +// Rollback resets the STM. This is useful for uncommitted transaction rollback +// and also used in the STM main loop to reset state if commit fails. +func (s *stm) Rollback() { + s.rset = make(map[string]stmGet) + s.wset = make(map[string]stmPut) + s.lset = make(map[string]interface{}) + s.getOpts = nil + s.revision = math.MaxInt64 - 1 +} diff --git a/channeldb/kvdb/etcd/stm_test.go b/channeldb/kvdb/etcd/stm_test.go new file mode 100644 index 0000000000..767963d4f0 --- /dev/null +++ b/channeldb/kvdb/etcd/stm_test.go @@ -0,0 +1,344 @@ +// +build kvdb_etcd + +package etcd + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func reverseKVs(a []KV) []KV { + for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 { + a[i], a[j] = a[j], a[i] + } + + return a +} + +func TestPutToEmpty(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + apply := func(stm STM) error { + stm.Put("123", "abc") + return nil + } + + err = RunSTM(db.cli, apply) + assert.NoError(t, err) + + assert.Equal(t, "abc", f.Get("123")) +} + +func TestGetPutDel(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.cleanup() + + testKeyValues := []KV{ + {"a", "1"}, + {"b", "2"}, + {"c", "3"}, + {"d", "4"}, + {"e", "5"}, + } + + for _, kv := range testKeyValues { + f.Put(kv.key, kv.val) + } + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + apply := func(stm STM) error { + // Get some non existing keys. + v, err := stm.Get("") + assert.NoError(t, err) + assert.Nil(t, v) + + v, err = stm.Get("x") + assert.NoError(t, err) + assert.Nil(t, v) + + // Get all existing keys. + for _, kv := range testKeyValues { + v, err = stm.Get(kv.key) + assert.NoError(t, err) + assert.Equal(t, []byte(kv.val), v) + } + + // Overwrite, then delete an existing key. + stm.Put("c", "6") + + v, err = stm.Get("c") + assert.NoError(t, err) + assert.Equal(t, []byte("6"), v) + + stm.Del("c") + + v, err = stm.Get("c") + assert.NoError(t, err) + assert.Nil(t, v) + + // Re-add the deleted key. + stm.Put("c", "7") + + v, err = stm.Get("c") + assert.NoError(t, err) + assert.Equal(t, []byte("7"), v) + + // Add a new key. + stm.Put("x", "x") + + v, err = stm.Get("x") + assert.NoError(t, err) + assert.Equal(t, []byte("x"), v) + + return nil + } + + err = RunSTM(db.cli, apply) + assert.NoError(t, err) + + assert.Equal(t, "1", f.Get("a")) + assert.Equal(t, "2", f.Get("b")) + assert.Equal(t, "7", f.Get("c")) + assert.Equal(t, "4", f.Get("d")) + assert.Equal(t, "5", f.Get("e")) + assert.Equal(t, "x", f.Get("x")) +} + +func TestFirstLastNextPrev(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + testKeyValues := []KV{ + {"kb", "1"}, + {"kc", "2"}, + {"kda", "3"}, + {"ke", "4"}, + {"w", "w"}, + } + for _, kv := range testKeyValues { + f.Put(kv.key, kv.val) + } + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + apply := func(stm STM) error { + // First/Last on valid multi item interval. + kv, err := stm.First("k") + assert.NoError(t, err) + assert.Equal(t, &KV{"kb", "1"}, kv) + + kv, err = stm.Last("k") + assert.NoError(t, err) + assert.Equal(t, &KV{"ke", "4"}, kv) + + // First/Last on single item interval. + kv, err = stm.First("w") + assert.NoError(t, err) + assert.Equal(t, &KV{"w", "w"}, kv) + + kv, err = stm.Last("w") + assert.NoError(t, err) + assert.Equal(t, &KV{"w", "w"}, kv) + + // Next/Prev on start/end. + kv, err = stm.Next("k", "ke") + assert.NoError(t, err) + assert.Nil(t, kv) + + kv, err = stm.Prev("k", "kb") + assert.NoError(t, err) + assert.Nil(t, kv) + + // Next/Prev in the middle. + kv, err = stm.Next("k", "kc") + assert.NoError(t, err) + assert.Equal(t, &KV{"kda", "3"}, kv) + + kv, err = stm.Prev("k", "ke") + assert.NoError(t, err) + assert.Equal(t, &KV{"kda", "3"}, kv) + + // Delete first item, then add an item before the + // deleted one. Check that First/Next will "jump" + // over the deleted item and return the new first. + stm.Del("kb") + stm.Put("ka", "0") + + kv, err = stm.First("k") + assert.NoError(t, err) + assert.Equal(t, &KV{"ka", "0"}, kv) + + kv, err = stm.Prev("k", "kc") + assert.NoError(t, err) + assert.Equal(t, &KV{"ka", "0"}, kv) + + // Similarly test that a new end is returned if + // the old end is deleted first. + stm.Del("ke") + stm.Put("kf", "5") + + kv, err = stm.Last("k") + assert.NoError(t, err) + assert.Equal(t, &KV{"kf", "5"}, kv) + + kv, err = stm.Next("k", "kda") + assert.NoError(t, err) + assert.Equal(t, &KV{"kf", "5"}, kv) + + // Overwrite one in the middle. + stm.Put("kda", "6") + + kv, err = stm.Next("k", "kc") + assert.NoError(t, err) + assert.Equal(t, &KV{"kda", "6"}, kv) + + // Add three in the middle, then delete one. + stm.Put("kdb", "7") + stm.Put("kdc", "8") + stm.Put("kdd", "9") + stm.Del("kdc") + + // Check that stepping from first to last returns + // the expected sequence. + var kvs []KV + + curr, err := stm.First("k") + assert.NoError(t, err) + + for curr != nil { + kvs = append(kvs, *curr) + curr, err = stm.Next("k", curr.key) + assert.NoError(t, err) + } + + expected := []KV{ + {"ka", "0"}, + {"kc", "2"}, + {"kda", "6"}, + {"kdb", "7"}, + {"kdd", "9"}, + {"kf", "5"}, + } + assert.Equal(t, expected, kvs) + + // Similarly check that stepping from last to first + // returns the expected sequence. + kvs = []KV{} + + curr, err = stm.Last("k") + assert.NoError(t, err) + + for curr != nil { + kvs = append(kvs, *curr) + curr, err = stm.Prev("k", curr.key) + assert.NoError(t, err) + } + + expected = reverseKVs(expected) + assert.Equal(t, expected, kvs) + + return nil + } + + err = RunSTM(db.cli, apply) + assert.NoError(t, err) + + assert.Equal(t, "0", f.Get("ka")) + assert.Equal(t, "2", f.Get("kc")) + assert.Equal(t, "6", f.Get("kda")) + assert.Equal(t, "7", f.Get("kdb")) + assert.Equal(t, "9", f.Get("kdd")) + assert.Equal(t, "5", f.Get("kf")) + assert.Equal(t, "w", f.Get("w")) +} + +func TestCommitError(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + // Preset DB state. + f.Put("123", "xyz") + + // Count the number of applies. + cnt := 0 + + apply := func(stm STM) error { + // STM must have the key/value. + val, err := stm.Get("123") + assert.NoError(t, err) + + if cnt == 0 { + assert.Equal(t, []byte("xyz"), val) + + // Put a conflicting key/value during the first apply. + f.Put("123", "def") + } + + // We'd expect to + stm.Put("123", "abc") + + cnt++ + return nil + } + + err = RunSTM(db.cli, apply) + assert.NoError(t, err) + assert.Equal(t, 2, cnt) + + assert.Equal(t, "abc", f.Get("123")) +} + +func TestManualTxError(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + assert.NoError(t, err) + + // Preset DB state. + f.Put("123", "xyz") + + stm := NewSTM(db.cli) + + val, err := stm.Get("123") + assert.NoError(t, err) + assert.Equal(t, []byte("xyz"), val) + + // Put a conflicting key/value. + f.Put("123", "def") + + // Should still get the original version. + val, err = stm.Get("123") + assert.NoError(t, err) + assert.Equal(t, []byte("xyz"), val) + + // Commit will fail with CommitError. + err = stm.Commit() + var e CommitError + assert.True(t, errors.As(err, &e)) + + // We expect that the transacton indeed did not commit. + assert.Equal(t, "def", f.Get("123")) +} diff --git a/channeldb/kvdb/etcd/walletdb_interface_test.go b/channeldb/kvdb/etcd/walletdb_interface_test.go new file mode 100644 index 0000000000..7d4ef023af --- /dev/null +++ b/channeldb/kvdb/etcd/walletdb_interface_test.go @@ -0,0 +1,17 @@ +// +build kvdb_etcd + +package etcd + +import ( + "testing" + + "github.com/btcsuite/btcwallet/walletdb/walletdbtest" +) + +// TestWalletDBInterface performs the WalletDB interface test suite for the +// etcd database driver. +func TestWalletDBInterface(t *testing.T) { + f := NewEtcdTestFixture(t) + defer f.Cleanup() + walletdbtest.TestInterface(t, dbType, f.BackendConfig()) +} diff --git a/channeldb/kvdb/interface.go b/channeldb/kvdb/interface.go index ec426410c7..46aded8c79 100644 --- a/channeldb/kvdb/interface.go +++ b/channeldb/kvdb/interface.go @@ -11,13 +11,24 @@ import ( // transaction is rolled back. If the rollback fails, the original error // returned by f is still returned. If the commit fails, the commit error is // returned. -var Update = walletdb.Update +func Update(db Backend, f func(tx RwTx) error) error { + if extendedDB, ok := db.(ExtendedBackend); ok { + return extendedDB.Update(f) + } + return walletdb.Update(db, f) +} // View opens a database read transaction and executes the function f with the // transaction passed as a parameter. After f exits, the transaction is rolled // back. If f errors, its error is returned, not a rollback error (if any // occur). -var View = walletdb.View +func View(db Backend, f func(tx ReadTx) error) error { + if extendedDB, ok := db.(ExtendedBackend); ok { + return extendedDB.View(f) + } + + return walletdb.View(db, f) +} // Batch is identical to the Update call, but it attempts to combine several // individual Update transactions into a single write database transaction on @@ -36,6 +47,29 @@ var Create = walletdb.Create // through read or read+write transactions. type Backend = walletdb.DB +// ExtendedBackend is and interface that supports View and Update and also able +// to collect database access patterns. +type ExtendedBackend interface { + Backend + + // PrintStats returns all collected stats pretty printed into a string. + PrintStats() string + + // View opens a database read transaction and executes the function f with + // the transaction passed as a parameter. After f exits, the transaction is + // rolled back. If f errors, its error is returned, not a rollback error + // (if any occur). + View(f func(tx walletdb.ReadTx) error) error + + // Update opens a database read/write transaction and executes the function + // f with the transaction passed as a parameter. After f exits, if f did not + // error, the transaction is committed. Otherwise, if f did error, the + // transaction is rolled back. If the rollback fails, the original error + // returned by f is still returned. If the commit fails, the commit error is + // returned. + Update(f func(tx walletdb.ReadWriteTx) error) error +} + // Open opens an existing database for the specified type. The arguments are // specific to the database type driver. See the documentation for the database // driver for further details. diff --git a/channeldb/kvdb/kvdb_etcd.go b/channeldb/kvdb/kvdb_etcd.go new file mode 100644 index 0000000000..265e7daeb4 --- /dev/null +++ b/channeldb/kvdb/kvdb_etcd.go @@ -0,0 +1,49 @@ +// +build kvdb_etcd + +package kvdb + +import ( + "github.com/lightningnetwork/lnd/channeldb/kvdb/etcd" +) + +// TestBackend is conditionally set to etcd when the kvdb_etcd build tag is +// defined, allowing testing our database code with etcd backend. +const TestBackend = EtcdBackendName + +// GetEtcdBackend returns an etcd backend configured according to the +// passed etcdConfig. +func GetEtcdBackend(prefix string, etcdConfig *EtcdConfig) (Backend, error) { + // Config translation is needed here in order to keep the + // etcd package fully independent from the rest of the source tree. + backendConfig := etcd.BackendConfig{ + Host: etcdConfig.Host, + User: etcdConfig.User, + Pass: etcdConfig.Pass, + CertFile: etcdConfig.CertFile, + KeyFile: etcdConfig.KeyFile, + InsecureSkipVerify: etcdConfig.InsecureSkipVerify, + Prefix: prefix, + CollectCommitStats: etcdConfig.CollectStats, + } + + return Open(EtcdBackendName, backendConfig) +} + +// GetEtcdTestBackend creates an embedded etcd backend for testing +// storig the database at the passed path. +func GetEtcdTestBackend(path, name string) (Backend, func(), error) { + empty := func() {} + + config, cleanup, err := etcd.NewEmbeddedEtcdInstance(path) + if err != nil { + return nil, empty, err + } + + backend, err := Open(EtcdBackendName, *config) + if err != nil { + cleanup() + return nil, empty, err + } + + return backend, cleanup, nil +} diff --git a/channeldb/kvdb/kvdb_no_etcd.go b/channeldb/kvdb/kvdb_no_etcd.go new file mode 100644 index 0000000000..ea5de4275d --- /dev/null +++ b/channeldb/kvdb/kvdb_no_etcd.go @@ -0,0 +1,24 @@ +// +build !kvdb_etcd + +package kvdb + +import ( + "fmt" +) + +// TestBackend is conditionally set to bdb when the kvdb_etcd build tag is +// not defined, allowing testing our database code with bolt backend. +const TestBackend = BoltBackendName + +var errEtcdNotAvailable = fmt.Errorf("etcd backend not available") + +// GetEtcdBackend is a stub returning nil and errEtcdNotAvailable error. +func GetEtcdBackend(prefix string, etcdConfig *EtcdConfig) (Backend, error) { + return nil, errEtcdNotAvailable +} + +// GetTestEtcdBackend is a stub returning nil, an empty closure and an +// errEtcdNotAvailable error. +func GetEtcdTestBackend(path, name string) (Backend, func(), error) { + return nil, func() {}, errEtcdNotAvailable +} diff --git a/channeldb/meta_test.go b/channeldb/meta_test.go index a18932f477..9480112527 100644 --- a/channeldb/meta_test.go +++ b/channeldb/meta_test.go @@ -3,6 +3,7 @@ package channeldb import ( "bytes" "io/ioutil" + "os" "testing" "github.com/go-errors/errors" @@ -421,12 +422,21 @@ func TestMigrationReversion(t *testing.T) { t.Parallel() tempDirName, err := ioutil.TempDir("", "channeldb") + defer func() { + os.RemoveAll(tempDirName) + }() if err != nil { t.Fatalf("unable to create temp dir: %v", err) } - cdb, err := Open(tempDirName) + backend, cleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") + if err != nil { + t.Fatalf("unable to get test db backend: %v", err) + } + + cdb, err := CreateWithBackend(backend) if err != nil { + cleanup() t.Fatalf("unable to open channeldb: %v", err) } @@ -442,12 +452,19 @@ func TestMigrationReversion(t *testing.T) { // Close the database. Even if we succeeded, our next step is to reopen. cdb.Close() + cleanup() if err != nil { t.Fatalf("unable to increase db version: %v", err) } - _, err = Open(tempDirName) + backend, cleanup, err = kvdb.GetTestBackend(tempDirName, "cdb") + if err != nil { + t.Fatalf("unable to get test db backend: %v", err) + } + defer cleanup() + + _, err = CreateWithBackend(backend) if err != ErrDBReversion { t.Fatalf("unexpected error when opening channeldb, "+ "want: %v, got: %v", ErrDBReversion, err) diff --git a/channeldb/payment_control_test.go b/channeldb/payment_control_test.go index 030c13250d..c470a8f5f1 100644 --- a/channeldb/payment_control_test.go +++ b/channeldb/payment_control_test.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "reflect" "testing" "time" @@ -15,20 +14,6 @@ import ( "github.com/lightningnetwork/lnd/record" ) -func initDB() (*DB, error) { - tempPath, err := ioutil.TempDir("", "switchdb") - if err != nil { - return nil, err - } - - db, err := Open(tempPath) - if err != nil { - return nil, err - } - - return db, err -} - func genPreimage() ([32]byte, error) { var preimage [32]byte if _, err := io.ReadFull(rand.Reader, preimage[:]); err != nil { @@ -66,7 +51,8 @@ func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo, func TestPaymentControlSwitchFail(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -202,7 +188,9 @@ func TestPaymentControlSwitchFail(t *testing.T) { func TestPaymentControlSwitchDoubleSend(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -282,7 +270,9 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -313,7 +303,9 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { func TestPaymentControlFailsWithoutInFlight(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -339,7 +331,9 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) { func TestPaymentControlDeleteNonInFligt(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -481,7 +475,9 @@ func TestPaymentControlMultiShard(t *testing.T) { } runSubTest := func(t *testing.T, test testCase) { - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -728,7 +724,9 @@ func TestPaymentControlMultiShard(t *testing.T) { func TestPaymentControlMPPRecordValidation(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } diff --git a/channeldb/payments_test.go b/channeldb/payments_test.go index b522872260..2f0d88bcd9 100644 --- a/channeldb/payments_test.go +++ b/channeldb/payments_test.go @@ -351,7 +351,9 @@ func TestQueryPayments(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - db, err := initDB() + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { t.Fatalf("unable to init db: %v", err) } diff --git a/config.go b/config.go index 3fe0059f78..37c0c63a36 100644 --- a/config.go +++ b/config.go @@ -248,6 +248,8 @@ type Config struct { AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."` + DB *lncfg.DB `group:"db" namespace:"db"` + // registeredChains keeps track of all chains that have been registered // with the daemon. registeredChains *chainRegistry @@ -358,6 +360,7 @@ func DefaultConfig() Config { }, MaxOutgoingCltvExpiry: htlcswitch.DefaultMaxOutgoingCltvExpiry, MaxChannelFeeAllocation: htlcswitch.DefaultMaxLinkFeeAllocation, + DB: lncfg.DefaultDB(), registeredChains: newChainRegistry(), } } @@ -1073,6 +1076,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { cfg.Workers, cfg.Caches, cfg.WtClient, + cfg.DB, ) if err != nil { return nil, err @@ -1090,6 +1094,18 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { return &cfg, err } +// localDatabaseDir returns the default directory where the +// local bolt db files are stored. +func (c *Config) localDatabaseDir() string { + return filepath.Join(c.DataDir, + defaultGraphSubDirname, + normalizeNetwork(activeNetParams.Name)) +} + +func (c *Config) networkName() string { + return normalizeNetwork(activeNetParams.Name) +} + // CleanAndExpandPath expands environment variables and leading ~ in the // passed path, cleans the result, and returns it. // This function is taken from https://github.com/btcsuite/btcd diff --git a/go.mod b/go.mod index 1c4c1ca063..d6674b8e37 100644 --- a/go.mod +++ b/go.mod @@ -14,10 +14,18 @@ require ( github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 github.com/btcsuite/btcwallet/walletdb v1.3.1 github.com/btcsuite/btcwallet/wtxmgr v1.1.1-0.20200515224913-e0e62245ecbe + github.com/coreos/etcd v3.3.18+incompatible + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-00010101000000-000000000000 // indirect + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/davecgh/go-spew v1.1.1 + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/go-errors/errors v1.0.1 github.com/go-openapi/strfmt v0.19.5 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.3.1 + github.com/google/btree v1.0.0 // indirect + github.com/gorilla/websocket v1.4.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.8.6 @@ -25,7 +33,9 @@ require ( github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/jessevdk/go-flags v1.4.0 + github.com/jonboulle/clockwork v0.1.0 // indirect github.com/jrick/logrotate v1.0.0 + github.com/json-iterator/go v1.1.9 // indirect github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c // indirect github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 // indirect @@ -43,12 +53,19 @@ require ( github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 github.com/mattn/go-runewidth v0.0.9 // indirect github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/prometheus/client_golang v0.9.3 github.com/rogpeppe/fastuuid v1.2.0 // indirect + github.com/soheilhy/cmux v0.1.4 // indirect + github.com/stretchr/testify v1.4.0 + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 github.com/urfave/cli v1.18.0 + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + go.etcd.io/bbolt v1.3.3 + go.uber.org/zap v1.14.1 // indirect golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 - golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20200116001909-b77594299b42 // indirect golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 @@ -57,6 +74,7 @@ require ( gopkg.in/macaroon-bakery.v2 v2.0.1 gopkg.in/macaroon.v2 v2.0.0 gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/lightningnetwork/lnd/ticker => ./ticker @@ -71,4 +89,6 @@ replace git.schwanenlied.me/yawning/bsaes.git => github.com/Yawning/bsaes v0.0.0 // btcsuite/btcutil package requests a newer version. replace golang.org/x/crypto => golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 +replace github.com/coreos/go-systemd => github.com/coreos/go-systemd/v22 v22.0.0 + go 1.12 diff --git a/go.sum b/go.sum index eb42ab16f6..3ab92e2a65 100644 --- a/go.sum +++ b/go.sum @@ -44,7 +44,6 @@ github.com/btcsuite/btcwallet/wallet/txrules v1.0.0/go.mod h1:UwQE78yCerZ313EXZw github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0 h1:6DxkcoMnCPY4E9cUDPB5tbuuf40SmmMkSQkoE8vCT+s= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= github.com/btcsuite/btcwallet/walletdb v1.0.0/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk= -github.com/btcsuite/btcwallet/walletdb v1.2.0 h1:E0+M4jHOToAvGWZ27ew5AaDAHDi6fUiXkjUJUnoEOD0= github.com/btcsuite/btcwallet/walletdb v1.2.0/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= github.com/btcsuite/btcwallet/walletdb v1.3.1 h1:lW1Ac3F1jJY4K11P+YQtRNcP5jFk27ASfrV7C6mvRU0= github.com/btcsuite/btcwallet/walletdb v1.3.1/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= @@ -70,10 +69,20 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.18+incompatible h1:Zz1aXgDrFFi1nadh58tA9ktt06cmPTwNNP3dXwIq1lE= +github.com/coreos/etcd v3.3.18+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY= github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= @@ -91,20 +100,31 @@ github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9w github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42 h1:q3pnF5JFBNRz8sRD+IRj7Y6DMyYGTNqnZ9axTbSfoNI= github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -122,8 +142,12 @@ github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4Fw github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A= github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc= @@ -170,6 +194,11 @@ github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 h1:PRMAcldsl4mXKJeRNB/KV github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -179,6 +208,7 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -197,30 +227,50 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 h1:tcJ6OjwOMvExLlzrAVZute09ocAGa7KqOON60++Gz4E= github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02/go.mod h1:tHlrkM198S068ZqfrO6S8HsoJq2bF3ETfTL+kt4tInY= github.com/urfave/cli v1.18.0 h1:m9MfmZWX7bwr9kUcs/Asr95j0IVXzGNNc+/5ku2m26Q= github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529 h1:iMGN4xG0cnqj3t+zOM8wUB0BiPKHEwSxEZCvzcbZuvk= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -229,10 +279,15 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -241,6 +296,8 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= @@ -252,6 +309,11 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2I golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= @@ -267,6 +329,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso= gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/macaroon-bakery.v2 v2.0.1 h1:0N1TlEdfLP4HXNCg7MQUMp5XwvOoxk+oe9Owr2cpvsc= @@ -284,3 +347,6 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/lncfg/db.go b/lncfg/db.go new file mode 100644 index 0000000000..a265f95f14 --- /dev/null +++ b/lncfg/db.go @@ -0,0 +1,65 @@ +package lncfg + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +const ( + dbName = "channel.db" + boltBackend = "bolt" + etcdBackend = "etcd" +) + +// DB holds database configuration for LND. +type DB struct { + Backend string `long:"backend" description:"The selected database backend."` + + Etcd *kvdb.EtcdConfig `group:"etcd" namespace:"etcd" description:"Etcd settings."` + + Bolt *kvdb.BoltConfig `group:"bolt" namespace:"bolt" description:"Bolt settings."` +} + +// NewDB creates and returns a new default DB config. +func DefaultDB() *DB { + return &DB{ + Backend: boltBackend, + Bolt: &kvdb.BoltConfig{ + NoFreeListSync: true, + }, + } +} + +// Validate validates the DB config. +func (db *DB) Validate() error { + switch db.Backend { + case boltBackend: + + case etcdBackend: + if db.Etcd.Host == "" { + return fmt.Errorf("etcd host must be set") + } + + default: + return fmt.Errorf("unknown backend, must be either \"%v\" or \"%v\"", + boltBackend, etcdBackend) + } + + return nil +} + +// GetBackend returns a kvdb.Backend as set in the DB config. +func (db *DB) GetBackend(dbPath string, networkName string) ( + kvdb.Backend, error) { + + if db.Backend == etcdBackend { + // Prefix will separate key/values in the db. + return kvdb.GetEtcdBackend(networkName, db.Etcd) + } + + return kvdb.GetBoltBackend(dbPath, dbName, db.Bolt.NoFreeListSync) +} + +// Compile-time constraint to ensure Workers implements the Validator interface. +var _ Validator = (*DB)(nil) diff --git a/lnd.go b/lnd.go index 578f684556..a58d5cbd23 100644 --- a/lnd.go +++ b/lnd.go @@ -247,18 +247,22 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { } // Create the network-segmented directory for the channel database. - graphDir := filepath.Join(cfg.DataDir, - defaultGraphSubDirname, - normalizeNetwork(activeNetParams.Name)) - ltndLog.Infof("Opening the main database, this might take a few " + "minutes...") + chanDbBackend, err := cfg.DB.GetBackend( + cfg.localDatabaseDir(), cfg.networkName(), + ) + if err != nil { + ltndLog.Error(err) + return err + } + // Open the channeldb, which is dedicated to storing channel, and // network related metadata. startOpenTime := time.Now() - chanDB, err := channeldb.Open( - graphDir, + chanDB, err := channeldb.CreateWithBackend( + chanDbBackend, channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), channeldb.OptionSetSyncFreelist(cfg.SyncFreelist), @@ -493,7 +497,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { var towerClientDB *wtdb.ClientDB if cfg.WtClient.Active { var err error - towerClientDB, err = wtdb.OpenClientDB(graphDir) + towerClientDB, err = wtdb.OpenClientDB(cfg.localDatabaseDir()) if err != nil { err := fmt.Errorf("unable to open watchtower client "+ "database: %v", err) diff --git a/server.go b/server.go index b0f00c5a47..1f3b196fbd 100644 --- a/server.go +++ b/server.go @@ -377,8 +377,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB, // Initialize the sphinx router, placing it's persistent replay log in // the same directory as the channel graph database. - graphDir := chanDB.Path() - sharedSecretPath := filepath.Join(graphDir, "sphinxreplay.db") + sharedSecretPath := filepath.Join(cfg.localDatabaseDir(), "sphinxreplay.db") replayLog := htlcswitch.NewDecayedLog(sharedSecretPath, cc.chainNotifier) sphinxRouter := sphinx.NewRouter( nodeKeyECDH, activeNetParams.Params, replayLog,