From dc56c5c30fa9e6c1802ab6abfaa1cf0c5cf6ef7f Mon Sep 17 00:00:00 2001 From: Fatih Turker Date: Sun, 21 May 2023 02:25:41 +0300 Subject: [PATCH 1/3] Rate limit issue fix --- lib/agi/main.agi.ts | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/agi/main.agi.ts b/lib/agi/main.agi.ts index c428390..c7c2997 100644 --- a/lib/agi/main.agi.ts +++ b/lib/agi/main.agi.ts @@ -85,6 +85,13 @@ export class MainAGI { this.loggerUtil = new LoggerUtil(this.consolidationId, this.logPath); } + public isOneMinuteExceeded(previousDate: Date, currentDate: Date): boolean { + const timeDifference = currentDate.getTime() - previousDate.getTime(); + const oneMinuteInMillis = 60 * 1000; // One minute in milliseconds + + return timeDifference > oneMinuteInMillis; + } + /** * Starts an AGI (Artificial General Intelligence) action by initializing necessary components, * clearing previous folders, and generating prompts for user input based on previous responses. @@ -98,10 +105,14 @@ export class MainAGI { await this.fileUtil.createFolder(this.taskDir); - this.loggerUtil.log('Action started at ' + new Date().toISOString()); + let startDate = new Date(); + + this.loggerUtil.log('Action started at ' + startDate.toISOString()); this.openAIProvider.initialize(this.loggerUtil); + const TPM = 60000; + const memoryUtil = new MemoryUtil(this.fileUtil, this.ltmPath); await memoryUtil.resetLTM(); @@ -177,6 +188,8 @@ export class MainAGI { let iteration = 0; + let currentToken = 0; + while (!parsed.completed && content.maxAttempt >= attemptCount) { const stepName = 'Step ' + attemptCount.toString() + ': ' + parsed.step; @@ -226,9 +239,21 @@ export class MainAGI { } } - // 20 seconds delay between each request to avoid exceeding rate limit - this.loggerUtil.log('Waiting 20 seconds to do not exceed rate limit.'); - await this.delay(20000); + // Rate Limit Fix + const now = new Date(); + + if (this.isOneMinuteExceeded(startDate, now)) { + currentToken = 0; + } else { + const delayInterval = now.getTime() - startDate.getTime(); + const delaySeconds = delayInterval / 1000; + + this.loggerUtil.log( + `Waiting ${delaySeconds} seconds to avoid exceeding the rate limit.` + ); + await this.delay(delayInterval); + startDate = new Date(); + } try { res = await this.openAIProvider.generateCompletion( @@ -236,6 +261,8 @@ export class MainAGI { max_tokens ); + currentToken += this.openAIProvider.countTokens(nextPrompt); + this.loggerUtil.log('Response is captured. Processing..'); parsed = await this.processGpt4ApiResponse( From 472b50f5696ba3c340868d26a57413ff24911683 Mon Sep 17 00:00:00 2001 From: Fatih Turker Date: Sun, 21 May 2023 02:27:31 +0300 Subject: [PATCH 2/3] Add TMP check --- lib/agi/main.agi.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/agi/main.agi.ts b/lib/agi/main.agi.ts index c7c2997..284d183 100644 --- a/lib/agi/main.agi.ts +++ b/lib/agi/main.agi.ts @@ -244,7 +244,7 @@ export class MainAGI { if (this.isOneMinuteExceeded(startDate, now)) { currentToken = 0; - } else { + } else if (TPM <= currentToken) { const delayInterval = now.getTime() - startDate.getTime(); const delaySeconds = delayInterval / 1000; From ccf05c061a891b91e4072170596ff44f60329462 Mon Sep 17 00:00:00 2001 From: Fatih Turker Date: Sun, 21 May 2023 02:31:14 +0300 Subject: [PATCH 3/3] Use Max Token while TPM check --- lib/agi/main.agi.ts | 2 +- lib/provider/open-ai.provider.ts | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/agi/main.agi.ts b/lib/agi/main.agi.ts index 284d183..20dccc6 100644 --- a/lib/agi/main.agi.ts +++ b/lib/agi/main.agi.ts @@ -261,7 +261,7 @@ export class MainAGI { max_tokens ); - currentToken += this.openAIProvider.countTokens(nextPrompt); + currentToken += this.openAIProvider.getDefaultMaxToken(); this.loggerUtil.log('Response is captured. Processing..'); diff --git a/lib/provider/open-ai.provider.ts b/lib/provider/open-ai.provider.ts index 7911a1d..1ddb756 100644 --- a/lib/provider/open-ai.provider.ts +++ b/lib/provider/open-ai.provider.ts @@ -76,6 +76,10 @@ export class OpenAIAzureProvider { return this.MAX_TOKEN_COUNT - this.countTokens(prompt) - 10; }; + getDefaultMaxToken = () => { + return this.MAX_TOKEN_COUNT; + }; + /** * Sends a completion request to OpenAI API. *