Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prevent segfaults: defer closing until all operations are done #612

Merged
merged 1 commit into from
Apr 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 12 additions & 13 deletions binding.cc
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,6 @@ static void FinalizeDatabase (napi_env env, void* data, void* hint) {

/**
* Base worker class for doing async work that defers closing the database.
* @TODO Use this for Get-, Del-, Batch-, BatchWrite-, ApproximateSize- and CompactRangeWorker too.
*/
struct PriorityWorker : public BaseWorker {
PriorityWorker (napi_env env, Database* database, napi_value callback, const char* resourceName)
Expand Down Expand Up @@ -929,14 +928,14 @@ NAPI_METHOD(db_put) {
/**
* Worker class for getting a value from a database.
*/
struct GetWorker final : public BaseWorker {
struct GetWorker final : public PriorityWorker {
GetWorker (napi_env env,
Database* database,
napi_value callback,
leveldb::Slice key,
bool asBuffer,
bool fillCache)
: BaseWorker(env, database, callback, "leveldown.db.get"),
: PriorityWorker(env, database, callback, "leveldown.db.get"),
key_(key),
asBuffer_(asBuffer) {
options_.fill_cache = fillCache;
Expand Down Expand Up @@ -994,13 +993,13 @@ NAPI_METHOD(db_get) {
/**
* Worker class for deleting a value from a database.
*/
struct DelWorker final : public BaseWorker {
struct DelWorker final : public PriorityWorker {
DelWorker (napi_env env,
Database* database,
napi_value callback,
leveldb::Slice key,
bool sync)
: BaseWorker(env, database, callback, "leveldown.db.del"),
: PriorityWorker(env, database, callback, "leveldown.db.del"),
key_(key) {
options_.sync = sync;
}
Expand Down Expand Up @@ -1037,13 +1036,13 @@ NAPI_METHOD(db_del) {
/**
* Worker class for calculating the size of a range.
*/
struct ApproximateSizeWorker final : public BaseWorker {
struct ApproximateSizeWorker final : public PriorityWorker {
ApproximateSizeWorker (napi_env env,
Database* database,
napi_value callback,
leveldb::Slice start,
leveldb::Slice end)
: BaseWorker(env, database, callback, "leveldown.db.approximate_size"),
: PriorityWorker(env, database, callback, "leveldown.db.approximate_size"),
start_(start), end_(end) {}

~ApproximateSizeWorker () {
Expand Down Expand Up @@ -1093,13 +1092,13 @@ NAPI_METHOD(db_approximate_size) {
/**
* Worker class for compacting a range in a database.
*/
struct CompactRangeWorker final : public BaseWorker {
struct CompactRangeWorker final : public PriorityWorker {
CompactRangeWorker (napi_env env,
Database* database,
napi_value callback,
leveldb::Slice start,
leveldb::Slice end)
: BaseWorker(env, database, callback, "leveldown.db.compact_range"),
: PriorityWorker(env, database, callback, "leveldown.db.compact_range"),
start_(start), end_(end) {}

~CompactRangeWorker () {
Expand Down Expand Up @@ -1579,13 +1578,13 @@ NAPI_METHOD(iterator_next) {
/**
* Worker class for batch write operation.
*/
struct BatchWorker final : public BaseWorker {
struct BatchWorker final : public PriorityWorker {
BatchWorker (napi_env env,
Database* database,
napi_value callback,
leveldb::WriteBatch* batch,
bool sync)
: BaseWorker(env, database, callback, "leveldown.batch.do"),
: PriorityWorker(env, database, callback, "leveldown.batch.do"),
batch_(batch) {
options_.sync = sync;
}
Expand Down Expand Up @@ -1772,12 +1771,12 @@ NAPI_METHOD(batch_clear) {
/**
* Worker class for batch write operation.
*/
struct BatchWriteWorker final : public BaseWorker {
struct BatchWriteWorker final : public PriorityWorker {
BatchWriteWorker (napi_env env,
Batch* batch,
napi_value callback,
bool sync)
: BaseWorker(env, batch->database_, callback, "leveldown.batch.write"),
: PriorityWorker(env, batch->database_, callback, "leveldown.batch.write"),
batch_(batch),
sync_(sync) {}

Expand Down
93 changes: 77 additions & 16 deletions test/segfault-test.js
Original file line number Diff line number Diff line change
@@ -1,28 +1,89 @@
const test = require('tape')
const testCommon = require('./common')
const operations = []

// See https://github.com/Level/leveldown/issues/157
test('close() does not segfault if there is a pending write', function (t) {
t.plan(3)
// The db must wait for pending operations to finish before closing. This to
// prevent segfaults and in the case of compactRange() to prevent hanging. See
// https://github.com/Level/leveldown/issues/157 and 32.
function testPending (name, expectedCount, fn) {
operations.push(fn)

const db = testCommon.factory()
test(`close() waits for pending ${name}`, function (t) {
const db = testCommon.factory()
let count = 0

db.open(function (err) {
t.ifError(err, 'no open error')
db.open(function (err) {
t.ifError(err, 'no error from open()')

// The "sync" option seems to be a reliable way to trigger a segfault,
// but is not necessarily the cause of that segfault. More likely, it
// exposes a race condition that's already there.
db.put('foo', 'bar', { sync: true }, function (err) {
// We never get here, due to segfault.
t.ifError(err, 'no put error')
})
db.put('key', 'value', function (err) {
t.ifError(err, 'no error from put()')

db.close(function (err) {
// We never get here, due to segfault.
t.ifError(err, 'no close error')
fn(db, function (err) {
count++
t.ifError(err, 'no error from operation')
})

db.close(function (err) {
t.ifError(err, 'no error from close()')
t.is(count, expectedCount, 'operation(s) finished before close')
t.end()
})
})
})
})
}

testPending('get()', 1, function (db, next) {
db.get('key', next)
})

testPending('put()', 1, function (db, next) {
db.put('key2', 'value', next)
})

testPending('put() with { sync }', 1, function (db, next) {
// The sync option makes the operation slower and thus more likely to
// cause a segfault (if closing were to happen during the operation).
db.put('key2', 'value', { sync: true }, next)
})

testPending('del()', 1, function (db, next) {
db.del('key', next)
})

testPending('del() with { sync }', 1, function (db, next) {
db.del('key', { sync: true }, next)
})

testPending('batch([])', 1, function (db, next) {
db.batch([{ type: 'del', key: 'key' }], next)
})

testPending('batch([]) with { sync }', 1, function (db, next) {
db.batch([{ type: 'del', key: 'key' }], { sync: true }, next)
})

testPending('batch()', 1, function (db, next) {
db.batch().del('key').write(next)
})

testPending('batch() with { sync }', 1, function (db, next) {
db.batch().del('key').write({ sync: true }, next)
})

testPending('approximateSize()', 1, function (db, next) {
db.approximateSize('a', 'z', next)
})

testPending('compactRange()', 1, function (db, next) {
db.compactRange('a', 'z', next)
})

// Test multiple pending operations, using all of the above.
testPending('operations', operations.length, function (db, next) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Badass!

for (let fn of operations.slice(0, -1)) {
fn(db, next)
}
})

// See https://github.com/Level/leveldown/issues/134
Expand Down