diff --git a/mvcc/index.go b/mvcc/index.go index 53b3ebc66b3..13a350c19ce 100644 --- a/mvcc/index.go +++ b/mvcc/index.go @@ -85,6 +85,21 @@ func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { return nil } +func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) { + keyi, endi := &keyIndex{key: key}, &keyIndex{key: end} + + ti.RLock() + defer ti.RUnlock() + + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.key) > 0 && !item.Less(endi) { + return false + } + f(item.(*keyIndex)) + return true + }) +} + func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) @@ -93,8 +108,12 @@ func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) { } return []revision{rev} } - _, rev := ti.Range(key, end, atRev) - return rev + ti.visit(key, end, func(ki *keyIndex) { + if rev, _, _, err := ki.get(atRev); err == nil { + revs = append(revs, rev) + } + }) + return revs } func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { @@ -105,27 +124,12 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs [] } return [][]byte{key}, []revision{rev} } - - keyi := &keyIndex{key: key} - endi := &keyIndex{key: end} - - ti.RLock() - defer ti.RUnlock() - - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false + ti.visit(key, end, func(ki *keyIndex) { + if rev, _, _, err := ki.get(atRev); err == nil { + revs = append(revs, rev) + keys = append(keys, ki.key) } - curKeyi := item.(*keyIndex) - rev, _, _, err := curKeyi.get(atRev) - if err != nil { - return true - } - revs = append(revs, rev) - keys = append(keys, curKeyi.key) - return true }) - return keys, revs } diff --git a/mvcc/kvstore_bench_test.go b/mvcc/kvstore_bench_test.go index 92f3d6d507c..a64a3c5a57b 100644 --- a/mvcc/kvstore_bench_test.go +++ b/mvcc/kvstore_bench_test.go @@ -45,22 +45,34 @@ func BenchmarkStorePut(b *testing.B) { } } -func BenchmarkStoreRangeOneKey(b *testing.B) { +func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) } +func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) } + +func benchmarkStoreRange(b *testing.B, n int) { var i fakeConsistentIndex be, tmpPath := backend.NewDefaultTmpBackend() s := NewStore(be, &lease.FakeLessor{}, &i) defer cleanup(s, be, tmpPath) // 64 byte key/val - key, val := createBytesSlice(64, 1), createBytesSlice(64, 1) - s.Put(key[0], val[0], lease.NoLease) + keys, val := createBytesSlice(64, n), createBytesSlice(64, 1) + for i := range keys { + s.Put(keys[i], val[0], lease.NoLease) + } // Force into boltdb tx instead of backend read tx. s.Commit() + var begin, end []byte + if n == 1 { + begin, end = keys[0], nil + } else { + begin, end = []byte{}, []byte{} + } + b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.Range(key[0], nil, RangeOptions{}) + s.Range(begin, end, RangeOptions{}) } }