diff --git a/orga/performance-trackings.md b/orga/performance-trackings.md index acb4384cce0..a6e1f72f2ba 100644 --- a/orga/performance-trackings.md +++ b/orga/performance-trackings.md @@ -1211,3 +1211,89 @@ A2: Rewrite to own sort function BEFORE: 30.1 + + + +## 4 April 2023 - improve OPFS strorage performance + +#### (time-to-first-insert) + +BEFORE: +performanceResult: 54.26 + + +LOG LOG: 'performanceResult: 52.08' + + +AFTER: +'performanceResult: 13.66' + + +#### find-by-id + +BEFORE: + +'performanceResult: 34.82' +'performanceResult: 35.26' + +AFTER: + +'performanceResult: 20.15' +'performanceResult: 20.65' + +AFTER2: +19.03 +18.9 + +AFTER3: +performanceResult: 12.59 +performanceResult: 12.78 + +AFTERR4: +performanceResult: 12.87 + +### insert documents + +BEFORE: +'performanceResult: 34.52' +LOG LOG: 'performanceResult: 34.1' + + +bulkWrite(20000) 1 - 290.4000000022352 +file-system-access.worker.js:9220 bulkWrite(20000) 2 - 352.5 +file-system-access.worker.js:9230 bulkWrite(20000) 3 - 352.6000000014901 +context.js:265 . +file-system-access.worker.js:9236 bulkWrite(20000) 4 - 461.6000000014901 +file-system-access.worker.js:9252 bulkWrite(20000) 5 - 1091.6000000014901 +context.js:265 . +file-system-access.worker.js:9257 bulkWrite(20000) 6 - 1428.9000000022352 +context.js:265 + + +### Query + +BEFORE: +'performanceResult: 11.38' +'performanceResult: 11.17' + +AFTER: +performanceResult: 10.91 +'performanceResult: 10.38' + + +### init storage + +BEFORE: +performanceResult: 16.97 + +AFTER: +performanceResult: 15.95 +performanceResult: 15.77 + +AFTER2: +performanceResult: 14.7 +performanceResult: 13.83 + +AFTER3: +performanceResult: 12.86 +performanceResult: 12.33 diff --git a/src/custom-index.ts b/src/custom-index.ts index a45e8297d8a..8937494ed7f 100644 --- a/src/custom-index.ts +++ b/src/custom-index.ts @@ -177,7 +177,8 @@ export function getPrimaryKeyFromIndexableString( primaryKeyLength: number ): string { const paddedPrimaryKey = indexableString.slice(primaryKeyLength * -1); - const primaryKey = paddedPrimaryKey.trimEnd(); + // we can savely trim here because the primary key is not allowed to start or end with a space char. + const primaryKey = paddedPrimaryKey.trim(); return primaryKey; } diff --git a/src/rx-storage-helper.ts b/src/rx-storage-helper.ts index b946def2afe..929de29990a 100644 --- a/src/rx-storage-helper.ts +++ b/src/rx-storage-helper.ts @@ -151,6 +151,7 @@ export function throwIfIsStorageWriteError( * and which events must be emitted and which documents cause a conflict * and must not be written. * Used as helper inside of some RxStorage implementations. + * @hotPath The performance of this function is critical */ export function categorizeBulkWriteRows( storageInstance: RxStorageInstance, @@ -208,7 +209,9 @@ export function categorizeBulkWriteRows( const docsByIdIsMap = typeof docsInDb.get === 'function'; let newestRow: BulkWriteRowProcessed | undefined; - bulkWriteRows.forEach(writeRow => { + const rowAmount = bulkWriteRows.length; + for (let i = 0; i < rowAmount; i++) { + const writeRow = bulkWriteRows[i]; const id = writeRow.document[primaryPath]; const documentInDb = docsByIdIsMap ? (docsInDb as any).get(id) : (docsInDb as any)[id]; let attachmentError: RxStorageWriteErrorAttachment | undefined; @@ -292,7 +295,7 @@ export function categorizeBulkWriteRows( documentInDb }; errors[id as any] = err; - return; + break; } // handle attachments data @@ -411,7 +414,7 @@ export function categorizeBulkWriteRows( endTime: now() }); } - }); + } return { bulkInsertDocs, diff --git a/test/unit/data-migration.test.ts b/test/unit/data-migration.test.ts index 646d25b612d..bfa6d8b1d6c 100644 --- a/test/unit/data-migration.test.ts +++ b/test/unit/data-migration.test.ts @@ -858,86 +858,84 @@ config.parallel('data-migration.test.ts', () => { describe('major versions', () => { }); describe('issues', () => { - describe('#212 migration runs into infinity-loop', () => { - it('reproduce and fix', async () => { - const dbName = randomCouchString(10); - const schema0 = { - title: 'hero schema', - description: 'describes a simple hero', - version: 0, - primaryKey: 'name', - type: 'object', - properties: { - name: { - type: 'string', - maxLength: 100 - }, - color: { - type: 'string' - } + it('#212 migration runs into infinity-loop', async () => { + const dbName = randomCouchString(10); + const schema0 = { + title: 'hero schema', + description: 'describes a simple hero', + version: 0, + primaryKey: 'name', + type: 'object', + properties: { + name: { + type: 'string', + maxLength: 100 }, - required: ['color'] - }; - const schema1 = { - title: 'hero schema', - description: 'describes a simple hero', - version: 1, - primaryKey: 'name', - type: 'object', - properties: { - name: { - type: 'string', - maxLength: 100 - }, - color: { - type: 'string' - }, - level: { - type: 'string' - } + color: { + type: 'string' + } + }, + required: ['color'] + }; + const schema1 = { + title: 'hero schema', + description: 'describes a simple hero', + version: 1, + primaryKey: 'name', + type: 'object', + properties: { + name: { + type: 'string', + maxLength: 100 }, - required: ['color'] - }; - const db = await createRxDatabase({ - name: dbName, - storage: config.storage.getStorage(), - }); - const cols = await db.addCollections({ - heroes: { - schema: schema0 + color: { + type: 'string' + }, + level: { + type: 'string' } - }); - const col = cols.heroes; - await col.insert({ - name: 'Niven', - color: 'black' - }); - await db.destroy(); + }, + required: ['color'] + }; + const db = await createRxDatabase({ + name: dbName, + storage: config.storage.getStorage(), + }); + const cols = await db.addCollections({ + heroes: { + schema: schema0 + } + }); + const col = cols.heroes; + await col.insert({ + name: 'Niven', + color: 'black' + }); + await db.destroy(); - const db2 = await createRxDatabase({ - name: dbName, - storage: config.storage.getStorage(), - }); - const cols2 = await db2.addCollections({ - heroes: { - schema: schema1, - migrationStrategies: { - 1: (oldDoc: any) => { - oldDoc.level = 'ss'; - return oldDoc; - } + const db2 = await createRxDatabase({ + name: dbName, + storage: config.storage.getStorage(), + }); + const cols2 = await db2.addCollections({ + heroes: { + schema: schema1, + migrationStrategies: { + 1: (oldDoc: any) => { + oldDoc.level = 'ss'; + return oldDoc; } } - }); - const col2 = cols2.heroes; - - const docs = await col2.find().exec(); - assert.strictEqual(docs.length, 1); - assert.strictEqual(docs[0].level, 'ss'); - assert.strictEqual(docs[0].name, 'Niven'); - assert.strictEqual(docs[0].color, 'black'); - db2.destroy(); + } }); + const col2 = cols2.heroes; + + const docs = await col2.find().exec(); + assert.strictEqual(docs.length, 1); + assert.strictEqual(docs[0].level, 'ss'); + assert.strictEqual(docs[0].name, 'Niven'); + assert.strictEqual(docs[0].color, 'black'); + db2.destroy(); }); it('#3460 migrate attachments', async () => { if (!config.storage.hasAttachments) { diff --git a/test/unit/key-compression.test.ts b/test/unit/key-compression.test.ts index 99922cbd675..5696b3dc41f 100644 --- a/test/unit/key-compression.test.ts +++ b/test/unit/key-compression.test.ts @@ -53,14 +53,16 @@ config.parallel('key-compression.test.js', () => { c.database.destroy(); }); }); - describe('integration into pouchDB', () => { + describe('integration into the RxStorage', () => { it('should have saved a compressed document', async () => { const c = await getCollection(); const docData = schemaObjects.simpleHuman(); await c.insert(docData); - const internalInstance: RxStorageInstance = await (c.storageInstance.originalStorageInstance as any) + const internalInstance: RxStorageInstance< + schemaObjects.SimpleHumanDocumentType, any, any + > = await (c.storageInstance.originalStorageInstance as any) .originalStorageInstance; const storageDocs = await internalInstance.findDocumentsById([docData.passportId], true); diff --git a/test/unit/rx-storage-implementations.test.ts b/test/unit/rx-storage-implementations.test.ts index db8f757f32f..619c923c0c1 100644 --- a/test/unit/rx-storage-implementations.test.ts +++ b/test/unit/rx-storage-implementations.test.ts @@ -2829,7 +2829,7 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config. }); const id = 'foobar'; - const nonDeletedId = 'foobar2'; + const nonDeletedId = 'nonDeletedId'; /** * Insert one that does not get deleted @@ -3081,6 +3081,11 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config. multiInstance: true, devMode: true }); + + // run a fetch on both instances to ensure the setup has finished + await a.findDocumentsById(['foobar'], true); + await b.findDocumentsById(['foobar'], true); + return { a, b @@ -3090,6 +3095,26 @@ config.parallel('rx-storage-implementations.test.ts (implementation: ' + config. await instances.a.close(); await instances.b.close(); } + it('should update the state on the other instance', async () => { + const instances = await getMultiInstanceRxStorageInstance(); + + await instances.a.bulkWrite([{ + document: getWriteData({ key: 'a' }) + }], testContext); + await instances.b.bulkWrite([{ + document: getWriteData({ key: 'b' }) + }], testContext); + const allIds = ['a', 'b']; + + const resultA = await instances.a.findDocumentsById(allIds, true); + assert.deepStrictEqual(Object.keys(resultA), allIds); + + const resultB = await instances.b.findDocumentsById(allIds, true); + assert.deepStrictEqual(Object.keys(resultB), allIds); + + await instances.a.close(); + await instances.b.close(); + }); it('should be able to write and read documents', async () => { const instances = await getMultiInstanceRxStorageInstance();