Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: reset pxe indexes #10093

Merged
merged 3 commits into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions yarn-project/pxe/src/database/kv_pxe_database.ts
Original file line number Diff line number Diff line change
Expand Up @@ -702,4 +702,15 @@ export class KVPxeDatabase implements PxeDatabase {
#getTaggingSecretsIndexes(appTaggingSecrets: Fr[], storageMap: AztecMap<string, number>): Promise<number[]> {
return this.db.transaction(() => appTaggingSecrets.map(secret => storageMap.get(`${secret.toString()}`) ?? 0));
}

async resetNoteSyncData(): Promise<void> {
await this.db.transaction(() => {
for (const recipient of this.#taggingSecretIndexesForRecipients.keys()) {
void this.#taggingSecretIndexesForRecipients.delete(recipient);
}
for (const sender of this.#taggingSecretIndexesForSenders.keys()) {
nventuro marked this conversation as resolved.
Show resolved Hide resolved
void this.#taggingSecretIndexesForSenders.delete(sender);
}
});
}
}
8 changes: 8 additions & 0 deletions yarn-project/pxe/src/database/pxe_database.ts
Original file line number Diff line number Diff line change
Expand Up @@ -226,4 +226,12 @@ export interface PxeDatabase extends ContractArtifactDatabase, ContractInstanceD
* @param blockNumber - All nullifiers strictly after this block are removed.
*/
unnullifyNotesAfter(blockNumber: number): Promise<void>;

/**
* Resets the indexes used to sync notes to 0 for every
* sender and recipient, which will force full resync next time
* tagged logs are pulled from the node (as a recipient) and
* will reuse the same indexes for the sender.
Thunkar marked this conversation as resolved.
Show resolved Hide resolved
*/
resetNoteSyncData(): Promise<void>;
}
4 changes: 4 additions & 0 deletions yarn-project/pxe/src/pxe_service/pxe_service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -977,4 +977,8 @@ export class PXEService implements PXE {

return decodedEvents;
}

async resetNoteSyncData() {
return await this.db.resetNoteSyncData();
}
}
71 changes: 64 additions & 7 deletions yarn-project/pxe/src/simulator_oracle/simulator_oracle.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import { type PxeDatabase } from '../database/index.js';
import { KVPxeDatabase } from '../database/kv_pxe_database.js';
import { type OutgoingNoteDao } from '../database/outgoing_note_dao.js';
import { ContractDataOracle } from '../index.js';
import { type SimulatorOracle } from './index.js';
import { SimulatorOracle } from './index.js';

const TXS_PER_BLOCK = 4;
const NUM_NOTE_HASHES_PER_BLOCK = TXS_PER_BLOCK * MAX_NOTE_HASHES_PER_TX;
Expand Down Expand Up @@ -130,8 +130,7 @@ describe('Simulator oracle', () => {
contractDataOracle = new ContractDataOracle(database);
jest.spyOn(contractDataOracle, 'getDebugContractName').mockImplementation(() => Promise.resolve('TestContract'));
keyStore = new KeyStore(db);
const simulatorOracleModule = await import('../simulator_oracle/index.js');
simulatorOracle = new simulatorOracleModule.SimulatorOracle(contractDataOracle, database, keyStore, aztecNode);
simulatorOracle = new SimulatorOracle(contractDataOracle, database, keyStore, aztecNode);
// Set up contract address
contractAddress = AztecAddress.random();
// Set up recipient account
Expand Down Expand Up @@ -291,7 +290,7 @@ describe('Simulator oracle', () => {
expect(indexes).toEqual([6, 6, 6, 6, 6, 7, 7, 7, 7, 7]);

// We should have called the node 17 times:
// 5 times with no results (sender offset) + 2 times with logs (slide the window) + 10 times with no results (window size)
// 5 times with no results (sender offset) + 2 times with logs (sliding the window) + 10 times with no results (window size)
expect(aztecNode.getLogsByTags.mock.calls.length).toBe(5 + 2 + SENDER_OFFSET_WINDOW_SIZE);
});

Expand All @@ -306,19 +305,29 @@ describe('Simulator oracle', () => {
return poseidon2Hash([firstSenderSharedSecret.x, firstSenderSharedSecret.y, contractAddress]);
});

// Increase our indexes to 2
await database.setTaggingSecretsIndexesAsRecipient(secrets.map(secret => new IndexedTaggingSecret(secret, 2)));

const syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3);

// Even if our index as recipient is higher than what the recipient sent, we should be able to find the logs
// Even if our index as recipient is higher than what the sender sent, we should be able to find the logs
// since the window starts at Math.max(0, 2 - window_size) = 0
expect(syncedLogs.get(recipient.address.toString())).toHaveLength(NUM_SENDERS + 1 + NUM_SENDERS / 2);

// First sender should have 2 logs, but keep index 2 since they were built using the same tag
// Next 4 senders hould also have index 2 = offset + 1
// Last 5 senders should have index 3 = offset + 2
const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets);

expect(indexes).toHaveLength(NUM_SENDERS);
expect(indexes).toEqual([2, 2, 2, 2, 2, 3, 3, 3, 3, 3]);

// We should have called the node 13 times:
// 1 time without logs + 2 times with logs (sliding the window) + 10 times with no results (window size)
expect(aztecNode.getLogsByTags.mock.calls.length).toBe(3 + SENDER_OFFSET_WINDOW_SIZE);
});

it("should sync not tagged logs for which indexes are not updated if they're outside the window", async () => {
it("should not sync tagged logs for which indexes are not updated if they're outside the window", async () => {
const senderOffset = 0;
generateMockLogs(senderOffset);

Expand All @@ -335,14 +344,62 @@ describe('Simulator oracle', () => {

const syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3);

// Only half of the logs should be synced since we start from index 1 = offset + 1, the other half should be skipped
// Only half of the logs should be synced since we start from index 1 = (11 - window_size), the other half should be skipped
expect(syncedLogs.get(recipient.address.toString())).toHaveLength(NUM_SENDERS / 2);

// Indexes should remain where we set them (window_size + 1)
const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets);

expect(indexes).toHaveLength(NUM_SENDERS);
expect(indexes).toEqual([11, 11, 11, 11, 11, 11, 11, 11, 11, 11]);

// We should have called the node SENDER_OFFSET_WINDOW_SIZE + 1 (with logs) + SENDER_OFFSET_WINDOW_SIZE:
// Once for index 1 (NUM_SENDERS/2 logs) + 2 times the sliding window (no logs each time)
expect(aztecNode.getLogsByTags.mock.calls.length).toBe(1 + 2 * SENDER_OFFSET_WINDOW_SIZE);
});

it('should sync tagged logs from scratch after a DB wipe', async () => {
const senderOffset = 0;
generateMockLogs(senderOffset);

// Recompute the secrets (as recipient) to update indexes
const ivsk = await keyStore.getMasterIncomingViewingSecretKey(recipient.address);
const secrets = senders.map(sender => {
const firstSenderSharedSecret = computeTaggingSecret(recipient, ivsk, sender.completeAddress.address);
return poseidon2Hash([firstSenderSharedSecret.x, firstSenderSharedSecret.y, contractAddress]);
});

await database.setTaggingSecretsIndexesAsRecipient(
secrets.map(secret => new IndexedTaggingSecret(secret, SENDER_OFFSET_WINDOW_SIZE + 2)),
);

let syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3);

// No logs should be synced since we start from index 2 = 12 - window_size
expect(syncedLogs.get(recipient.address.toString())).toHaveLength(0);
// We should have called the node 21 times (window size + current_index + window size)
expect(aztecNode.getLogsByTags.mock.calls.length).toBe(2 * SENDER_OFFSET_WINDOW_SIZE + 1);

aztecNode.getLogsByTags.mockClear();

// Wipe the database
await database.resetNoteSyncData();

syncedLogs = await simulatorOracle.syncTaggedLogs(contractAddress, 3);

// First sender should have 2 logs, but keep index 1 since they were built using the same tag
// Next 4 senders hould also have index 1 = offset + 1
// Last 5 senders should have index 2 = offset + 2
const indexes = await database.getTaggingSecretsIndexesAsRecipient(secrets);

expect(indexes).toHaveLength(NUM_SENDERS);
expect(indexes).toEqual([1, 1, 1, 1, 1, 2, 2, 2, 2, 2]);

// We should have called the node 12 times:
// 2 times with logs (sliding the window) + 10 times with no results (window size)
expect(aztecNode.getLogsByTags.mock.calls.length).toBe(2 + SENDER_OFFSET_WINDOW_SIZE);
});

it('should not sync tagged logs with a blockNumber > maxBlockNumber', async () => {
const senderOffset = 0;
generateMockLogs(senderOffset);
Expand Down
2 changes: 2 additions & 0 deletions yarn-project/pxe/src/synchronizer/synchronizer.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ describe('Synchronizer', () => {
it('removes notes from db on a reorg', async () => {
const removeNotesAfter = jest.spyOn(database, 'removeNotesAfter').mockImplementation(() => Promise.resolve());
const unnullifyNotesAfter = jest.spyOn(database, 'unnullifyNotesAfter').mockImplementation(() => Promise.resolve());
const resetNoteSyncData = jest.spyOn(database, 'resetNoteSyncData').mockImplementation(() => Promise.resolve());
aztecNode.getBlockHeader.mockImplementation(blockNumber =>
Promise.resolve(L2Block.random(blockNumber as number).header),
);
Expand All @@ -53,5 +54,6 @@ describe('Synchronizer', () => {

expect(removeNotesAfter).toHaveBeenCalledWith(3);
expect(unnullifyNotesAfter).toHaveBeenCalledWith(3);
expect(resetNoteSyncData).toHaveBeenCalled();
});
});
2 changes: 2 additions & 0 deletions yarn-project/pxe/src/synchronizer/synchronizer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ export class Synchronizer implements L2BlockStreamEventHandler {
// We first unnullify and then remove so that unnullified notes that were created after the block number end up deleted.
await this.db.unnullifyNotesAfter(event.blockNumber);
await this.db.removeNotesAfter(event.blockNumber);
// Remove all note tagging indexes to force a full resync
Thunkar marked this conversation as resolved.
Show resolved Hide resolved
await this.db.resetNoteSyncData();
// Update the header to the last block.
await this.db.setHeader(await this.node.getBlockHeader(event.blockNumber));
break;
Expand Down