Skip to content

Commit

Permalink
Merge pull request #1273 from dgraph-io/ibrahim/release/v2.0-cherry-p…
Browse files Browse the repository at this point in the history
…ick-v2.0.3

Cherry-pick commits for v2.0.3
  • Loading branch information
Ibrahim Jarif authored Mar 26, 2020
2 parents 8fd5740 + b551699 commit b5dea5b
Show file tree
Hide file tree
Showing 21 changed files with 517 additions and 208 deletions.
8 changes: 3 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,7 @@ notifications:
slack:
secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=

before_script:
- go get github.com/mattn/goveralls
script:
- bash contrib/cover.sh $HOME/build coverage.out || travis_terminate 1
- goveralls -service=travis-ci -coverprofile=coverage.out || true
- goveralls -coverprofile=coverage.out -service=travis-ci
- go test -v ./...
# Another round of tests after turning off mmap
- go test -v -vlog_mmap=false github.com/dgraph-io/badger
29 changes: 28 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,32 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Serialization Versioning](VERSIONING.md).

## [2.0.2] - 2020-02-26
## [2.0.3] - 2020-03-24

### Fixed

- Add support for watching nil prefix in subscribe API (#1246)

### Performance

- Compress/Encrypt Blocks in the background (#1227)
- Disable cache by default (#1257)

### Features

- Add BypassDirLock option (#1243)
- Add separate cache for bloomfilters (#1260)

### New APIs
- badger.DB
- BfCacheMetrics (#1260)
- DataCacheMetrics (#1260)
- badger.Options
- WithBypassLockGuard (#1243)
- WithLoadBloomsOnOpen (#1260)
- WithMaxBfCacheSize (#1260)

## [2.0.2] - 2020-03-02

### Fixed

Expand Down Expand Up @@ -293,6 +318,8 @@ Bug fix:
## [1.0.1] - 2017-11-06
* Fix an uint16 overflow when resizing key slice

[Unreleased]: https://github.com/dgraph-io/badger/compare/v2.0.3...HEAD
[2.0.3]: https://github.com/dgraph-io/badger/compare/v2.0.2...v2.0.3
[2.0.2]: https://github.com/dgraph-io/badger/compare/v2.0.1...v2.0.2
[2.0.1]: https://github.com/dgraph-io/badger/compare/v2.0.0...v2.0.1
[2.0.0]: https://github.com/dgraph-io/badger/compare/v1.6.0...v2.0.0
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -776,6 +776,7 @@ Below is a list of known projects that use Badger:
* [Volument](https://volument.com/) - A new take on website analytics backed by Badger.
* [Sloop](https://github.com/salesforce/sloop) - Kubernetes History Visualization.
* [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger.
* [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system.

If you are using Badger in a project please send a pull request to add it to the list.

Expand Down
24 changes: 0 additions & 24 deletions contrib/cover.sh

This file was deleted.

88 changes: 59 additions & 29 deletions db.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ type DB struct {
pub *publisher
registry *KeyRegistry
blockCache *ristretto.Cache
bfCache *ristretto.Cache
}

const (
Expand Down Expand Up @@ -234,33 +235,35 @@ func Open(opt Options) (db *DB, err error) {
if err := createDirs(opt); err != nil {
return nil, err
}
dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if dirLockGuard != nil {
_ = dirLockGuard.release()
}
}()
absDir, err := filepath.Abs(opt.Dir)
if err != nil {
return nil, err
}
absValueDir, err := filepath.Abs(opt.ValueDir)
if err != nil {
return nil, err
}
if absValueDir != absDir {
valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
if !opt.BypassLockGuard {
dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if valueDirLockGuard != nil {
_ = valueDirLockGuard.release()
if dirLockGuard != nil {
_ = dirLockGuard.release()
}
}()
absDir, err := filepath.Abs(opt.Dir)
if err != nil {
return nil, err
}
absValueDir, err := filepath.Abs(opt.ValueDir)
if err != nil {
return nil, err
}
if absValueDir != absDir {
valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if valueDirLockGuard != nil {
_ = valueDirLockGuard.release()
}
}()
}
}
}

Expand Down Expand Up @@ -302,13 +305,28 @@ func Open(opt Options) (db *DB, err error) {
}
db.blockCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create cache")
return nil, errors.Wrap(err, "failed to create data cache")
}
}

if opt.MaxBfCacheSize > 0 {
config := ristretto.Config{
// Use 5% of cache memory for storing counters.
NumCounters: int64(float64(opt.MaxBfCacheSize) * 0.05 * 2),
MaxCost: int64(float64(opt.MaxBfCacheSize) * 0.95),
BufferItems: 64,
Metrics: true,
}
db.blockCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create bf cache")
}
}

if db.opt.InMemory {
db.opt.SyncWrites = false
db.opt.ValueThreshold = maxValueThreshold
// If badger is running in memory mode, push everything into the LSM Tree.
db.opt.ValueThreshold = math.MaxInt32
}
krOpt := KeyRegistryOptions{
ReadOnly: opt.ReadOnly,
Expand All @@ -331,6 +349,9 @@ func Open(opt Options) (db *DB, err error) {
return nil, err
}

// Initialize vlog struct.
db.vlog.init(db)

if !opt.ReadOnly {
db.closers.compactors = y.NewCloser(1)
db.lc.startCompact(db.closers.compactors)
Expand Down Expand Up @@ -387,14 +408,22 @@ func Open(opt Options) (db *DB, err error) {
return db, nil
}

// CacheMetrics returns the metrics for the underlying cache.
func (db *DB) CacheMetrics() *ristretto.Metrics {
// DataCacheMetrics returns the metrics for the underlying data cache.
func (db *DB) DataCacheMetrics() *ristretto.Metrics {
if db.blockCache != nil {
return db.blockCache.Metrics
}
return nil
}

// BfCacheMetrics returns the metrics for the underlying bloom filter cache.
func (db *DB) BfCacheMetrics() *ristretto.Metrics {
if db.bfCache != nil {
return db.bfCache.Metrics
}
return nil
}

// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
// disk. Calling DB.Close() multiple times would still only close the DB once.
func (db *DB) Close() error {
Expand Down Expand Up @@ -484,6 +513,7 @@ func (db *DB) close() (err error) {
db.closers.updateSize.SignalAndWait()
db.orc.Stop()
db.blockCache.Close()
db.bfCache.Close()

db.elog.Finish()
if db.opt.InMemory {
Expand Down Expand Up @@ -951,6 +981,7 @@ func (db *DB) handleFlushTask(ft flushTask) error {
bopts.DataKey = dk
// Builder does not need cache but the same options are used for opening table.
bopts.Cache = db.blockCache
bopts.BfCache = db.bfCache
tableData := buildL0Table(ft, bopts)

fileID := db.lc.reserveFileID()
Expand Down Expand Up @@ -1509,6 +1540,7 @@ func (db *DB) dropAll() (func(), error) {
db.lc.nextFileID = 1
db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
db.blockCache.Clear()
db.bfCache.Clear()

return resume, nil
}
Expand Down Expand Up @@ -1576,9 +1608,7 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes
if cb == nil {
return ErrNilCallback
}
if len(prefixes) == 0 {
return ErrNoPrefixes
}

c := y.NewCloser(1)
recvCh, id := db.pub.newSubscriber(c, prefixes...)
slurp := func(batch *pb.KVList) error {
Expand Down Expand Up @@ -1612,7 +1642,7 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes
err := slurp(batch)
if err != nil {
c.Done()
// Delete the subsriber if there is an error by the callback.
// Delete the subscriber if there is an error by the callback.
db.pub.deleteSubscriber(id)
return err
}
Expand Down
9 changes: 3 additions & 6 deletions db2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -669,16 +669,13 @@ func TestL0GCBug(t *testing.T) {
// Simulate a crash by not closing db1 but releasing the locks.
if db1.dirLockGuard != nil {
require.NoError(t, db1.dirLockGuard.release())
db1.dirLockGuard = nil
}
if db1.valueDirGuard != nil {
require.NoError(t, db1.valueDirGuard.release())
db1.valueDirGuard = nil
}
for _, f := range db1.vlog.filesMap {
require.NoError(t, f.fd.Close())
}
require.NoError(t, db1.registry.Close())
require.NoError(t, db1.lc.close())
require.NoError(t, db1.manifest.close())
require.NoError(t, db1.Close())

db2, err := Open(opts)
require.NoError(t, err)
Expand Down
3 changes: 0 additions & 3 deletions errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,6 @@ var (
// ErrNilCallback is returned when subscriber's callback is nil.
ErrNilCallback = errors.New("Callback cannot be nil")

// ErrNoPrefixes is returned when subscriber doesn't provide any prefix.
ErrNoPrefixes = errors.New("At least one key prefix is required")

// ErrEncryptionKeyMismatch is returned when the storage key is not
// matched with the key previously given.
ErrEncryptionKeyMismatch = errors.New("Encryption key mismatch")
Expand Down
2 changes: 2 additions & 0 deletions levels.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) {
topt.Compression = tf.Compression
topt.DataKey = dk
topt.Cache = db.blockCache
topt.BfCache = db.bfCache
t, err := table.OpenTable(fd, topt)
if err != nil {
if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") {
Expand Down Expand Up @@ -530,6 +531,7 @@ func (s *levelsController) compactBuildTables(
bopts.DataKey = dk
// Builder does not need cache but the same options are used for opening table.
bopts.Cache = s.kv.blockCache
bopts.BfCache = s.kv.bfCache
builder := table.NewTableBuilder(bopts)
var numKeys, numSkips uint64
for ; it.Valid(); it.Next() {
Expand Down
Loading

0 comments on commit b5dea5b

Please sign in to comment.