Skip to content

Commit

Permalink
ADD test
Browse files Browse the repository at this point in the history
  • Loading branch information
pubkey committed Apr 5, 2023
1 parent e34db2b commit 831410b
Show file tree
Hide file tree
Showing 6 changed files with 195 additions and 80 deletions.
86 changes: 86 additions & 0 deletions orga/performance-trackings.md
Original file line number Diff line number Diff line change
Expand Up @@ -1211,3 +1211,89 @@ A2: Rewrite to own sort function

BEFORE:
30.1



## 4 April 2023 - improve OPFS strorage performance

#### (time-to-first-insert)

BEFORE:
performanceResult: 54.26


LOG LOG: 'performanceResult: 52.08'


AFTER:
'performanceResult: 13.66'


#### find-by-id

BEFORE:

'performanceResult: 34.82'
'performanceResult: 35.26'

AFTER:

'performanceResult: 20.15'
'performanceResult: 20.65'

AFTER2:
19.03
18.9

AFTER3:
performanceResult: 12.59
performanceResult: 12.78

AFTERR4:
performanceResult: 12.87

### insert documents

BEFORE:
'performanceResult: 34.52'
LOG LOG: 'performanceResult: 34.1'


bulkWrite(20000) 1 - 290.4000000022352
file-system-access.worker.js:9220 bulkWrite(20000) 2 - 352.5
file-system-access.worker.js:9230 bulkWrite(20000) 3 - 352.6000000014901
context.js:265 .
file-system-access.worker.js:9236 bulkWrite(20000) 4 - 461.6000000014901
file-system-access.worker.js:9252 bulkWrite(20000) 5 - 1091.6000000014901
context.js:265 .
file-system-access.worker.js:9257 bulkWrite(20000) 6 - 1428.9000000022352
context.js:265


### Query

BEFORE:
'performanceResult: 11.38'
'performanceResult: 11.17'

AFTER:
performanceResult: 10.91
'performanceResult: 10.38'


### init storage

BEFORE:
performanceResult: 16.97

AFTER:
performanceResult: 15.95
performanceResult: 15.77

AFTER2:
performanceResult: 14.7
performanceResult: 13.83

AFTER3:
performanceResult: 12.86
performanceResult: 12.33
3 changes: 2 additions & 1 deletion src/custom-index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,8 @@ export function getPrimaryKeyFromIndexableString(
primaryKeyLength: number
): string {
const paddedPrimaryKey = indexableString.slice(primaryKeyLength * -1);
const primaryKey = paddedPrimaryKey.trimEnd();
// we can savely trim here because the primary key is not allowed to start or end with a space char.
const primaryKey = paddedPrimaryKey.trim();
return primaryKey;
}

Expand Down
9 changes: 6 additions & 3 deletions src/rx-storage-helper.ts
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ export function throwIfIsStorageWriteError<RxDocType>(
* and which events must be emitted and which documents cause a conflict
* and must not be written.
* Used as helper inside of some RxStorage implementations.
* @hotPath The performance of this function is critical
*/
export function categorizeBulkWriteRows<RxDocType>(
storageInstance: RxStorageInstance<any, any, any>,
Expand Down Expand Up @@ -208,7 +209,9 @@ export function categorizeBulkWriteRows<RxDocType>(
const docsByIdIsMap = typeof docsInDb.get === 'function';
let newestRow: BulkWriteRowProcessed<RxDocType> | undefined;

bulkWriteRows.forEach(writeRow => {
const rowAmount = bulkWriteRows.length;
for (let i = 0; i < rowAmount; i++) {
const writeRow = bulkWriteRows[i];
const id = writeRow.document[primaryPath];
const documentInDb = docsByIdIsMap ? (docsInDb as any).get(id) : (docsInDb as any)[id];
let attachmentError: RxStorageWriteErrorAttachment<RxDocType> | undefined;
Expand Down Expand Up @@ -292,7 +295,7 @@ export function categorizeBulkWriteRows<RxDocType>(
documentInDb
};
errors[id as any] = err;
return;
break;
}

// handle attachments data
Expand Down Expand Up @@ -411,7 +414,7 @@ export function categorizeBulkWriteRows<RxDocType>(
endTime: now()
});
}
});
}

return {
bulkInsertDocs,
Expand Down
144 changes: 71 additions & 73 deletions test/unit/data-migration.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -858,86 +858,84 @@ config.parallel('data-migration.test.ts', () => {
describe('major versions', () => {
});
describe('issues', () => {
describe('#212 migration runs into infinity-loop', () => {
it('reproduce and fix', async () => {
const dbName = randomCouchString(10);
const schema0 = {
title: 'hero schema',
description: 'describes a simple hero',
version: 0,
primaryKey: 'name',
type: 'object',
properties: {
name: {
type: 'string',
maxLength: 100
},
color: {
type: 'string'
}
it('#212 migration runs into infinity-loop', async () => {
const dbName = randomCouchString(10);
const schema0 = {
title: 'hero schema',
description: 'describes a simple hero',
version: 0,
primaryKey: 'name',
type: 'object',
properties: {
name: {
type: 'string',
maxLength: 100
},
required: ['color']
};
const schema1 = {
title: 'hero schema',
description: 'describes a simple hero',
version: 1,
primaryKey: 'name',
type: 'object',
properties: {
name: {
type: 'string',
maxLength: 100
},
color: {
type: 'string'
},
level: {
type: 'string'
}
color: {
type: 'string'
}
},
required: ['color']
};
const schema1 = {
title: 'hero schema',
description: 'describes a simple hero',
version: 1,
primaryKey: 'name',
type: 'object',
properties: {
name: {
type: 'string',
maxLength: 100
},
required: ['color']
};
const db = await createRxDatabase({
name: dbName,
storage: config.storage.getStorage(),
});
const cols = await db.addCollections({
heroes: {
schema: schema0
color: {
type: 'string'
},
level: {
type: 'string'
}
});
const col = cols.heroes;
await col.insert({
name: 'Niven',
color: 'black'
});
await db.destroy();
},
required: ['color']
};
const db = await createRxDatabase({
name: dbName,
storage: config.storage.getStorage(),
});
const cols = await db.addCollections({
heroes: {
schema: schema0
}
});
const col = cols.heroes;
await col.insert({
name: 'Niven',
color: 'black'
});
await db.destroy();

const db2 = await createRxDatabase({
name: dbName,
storage: config.storage.getStorage(),
});
const cols2 = await db2.addCollections({
heroes: {
schema: schema1,
migrationStrategies: {
1: (oldDoc: any) => {
oldDoc.level = 'ss';
return oldDoc;
}
const db2 = await createRxDatabase({
name: dbName,
storage: config.storage.getStorage(),
});
const cols2 = await db2.addCollections({
heroes: {
schema: schema1,
migrationStrategies: {
1: (oldDoc: any) => {
oldDoc.level = 'ss';
return oldDoc;
}
}
});
const col2 = cols2.heroes;

const docs = await col2.find().exec();
assert.strictEqual(docs.length, 1);
assert.strictEqual(docs[0].level, 'ss');
assert.strictEqual(docs[0].name, 'Niven');
assert.strictEqual(docs[0].color, 'black');
db2.destroy();
}
});
const col2 = cols2.heroes;

const docs = await col2.find().exec();
assert.strictEqual(docs.length, 1);
assert.strictEqual(docs[0].level, 'ss');
assert.strictEqual(docs[0].name, 'Niven');
assert.strictEqual(docs[0].color, 'black');
db2.destroy();
});
it('#3460 migrate attachments', async () => {
if (!config.storage.hasAttachments) {
Expand Down
6 changes: 4 additions & 2 deletions test/unit/key-compression.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,16 @@ config.parallel('key-compression.test.js', () => {
c.database.destroy();
});
});
describe('integration into pouchDB', () => {
describe('integration into the RxStorage', () => {
it('should have saved a compressed document', async () => {
const c = await getCollection();
const docData = schemaObjects.simpleHuman();
await c.insert(docData);


const internalInstance: RxStorageInstance<schemaObjects.SimpleHumanDocumentType, any, any> = await (c.storageInstance.originalStorageInstance as any)
const internalInstance: RxStorageInstance<
schemaObjects.SimpleHumanDocumentType, any, any
> = await (c.storageInstance.originalStorageInstance as any)
.originalStorageInstance;

const storageDocs = await internalInstance.findDocumentsById([docData.passportId], true);
Expand Down
27 changes: 26 additions & 1 deletion test/unit/rx-storage-implementations.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2829,7 +2829,7 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config.
});

const id = 'foobar';
const nonDeletedId = 'foobar2';
const nonDeletedId = 'nonDeletedId';

/**
* Insert one that does not get deleted
Expand Down Expand Up @@ -3081,6 +3081,11 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config.
multiInstance: true,
devMode: true
});

// run a fetch on both instances to ensure the setup has finished
await a.findDocumentsById(['foobar'], true);
await b.findDocumentsById(['foobar'], true);

return {
a,
b
Expand All @@ -3090,6 +3095,26 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config.
await instances.a.close();
await instances.b.close();
}
it('should update the state on the other instance', async () => {
const instances = await getMultiInstanceRxStorageInstance();

await instances.a.bulkWrite([{
document: getWriteData({ key: 'a' })
}], testContext);
await instances.b.bulkWrite([{
document: getWriteData({ key: 'b' })
}], testContext);
const allIds = ['a', 'b'];

const resultA = await instances.a.findDocumentsById(allIds, true);
assert.deepStrictEqual(Object.keys(resultA), allIds);

const resultB = await instances.b.findDocumentsById(allIds, true);
assert.deepStrictEqual(Object.keys(resultB), allIds);

await instances.a.close();
await instances.b.close();
});
it('should be able to write and read documents', async () => {
const instances = await getMultiInstanceRxStorageInstance();

Expand Down

0 comments on commit 831410b

Please sign in to comment.