diff --git a/lib/agi/main.agi.ts b/lib/agi/main.agi.ts index c428390..20dccc6 100644 --- a/lib/agi/main.agi.ts +++ b/lib/agi/main.agi.ts @@ -85,6 +85,13 @@ export class MainAGI { this.loggerUtil = new LoggerUtil(this.consolidationId, this.logPath); } + public isOneMinuteExceeded(previousDate: Date, currentDate: Date): boolean { + const timeDifference = currentDate.getTime() - previousDate.getTime(); + const oneMinuteInMillis = 60 * 1000; // One minute in milliseconds + + return timeDifference > oneMinuteInMillis; + } + /** * Starts an AGI (Artificial General Intelligence) action by initializing necessary components, * clearing previous folders, and generating prompts for user input based on previous responses. @@ -98,10 +105,14 @@ export class MainAGI { await this.fileUtil.createFolder(this.taskDir); - this.loggerUtil.log('Action started at ' + new Date().toISOString()); + let startDate = new Date(); + + this.loggerUtil.log('Action started at ' + startDate.toISOString()); this.openAIProvider.initialize(this.loggerUtil); + const TPM = 60000; + const memoryUtil = new MemoryUtil(this.fileUtil, this.ltmPath); await memoryUtil.resetLTM(); @@ -177,6 +188,8 @@ export class MainAGI { let iteration = 0; + let currentToken = 0; + while (!parsed.completed && content.maxAttempt >= attemptCount) { const stepName = 'Step ' + attemptCount.toString() + ': ' + parsed.step; @@ -226,9 +239,21 @@ export class MainAGI { } } - // 20 seconds delay between each request to avoid exceeding rate limit - this.loggerUtil.log('Waiting 20 seconds to do not exceed rate limit.'); - await this.delay(20000); + // Rate Limit Fix + const now = new Date(); + + if (this.isOneMinuteExceeded(startDate, now)) { + currentToken = 0; + } else if (TPM <= currentToken) { + const delayInterval = now.getTime() - startDate.getTime(); + const delaySeconds = delayInterval / 1000; + + this.loggerUtil.log( + `Waiting ${delaySeconds} seconds to avoid exceeding the rate limit.` + ); + await this.delay(delayInterval); + startDate = new Date(); + } try { res = await this.openAIProvider.generateCompletion( @@ -236,6 +261,8 @@ export class MainAGI { max_tokens ); + currentToken += this.openAIProvider.getDefaultMaxToken(); + this.loggerUtil.log('Response is captured. Processing..'); parsed = await this.processGpt4ApiResponse( diff --git a/lib/provider/open-ai.provider.ts b/lib/provider/open-ai.provider.ts index 7911a1d..1ddb756 100644 --- a/lib/provider/open-ai.provider.ts +++ b/lib/provider/open-ai.provider.ts @@ -76,6 +76,10 @@ export class OpenAIAzureProvider { return this.MAX_TOKEN_COUNT - this.countTokens(prompt) - 10; }; + getDefaultMaxToken = () => { + return this.MAX_TOKEN_COUNT; + }; + /** * Sends a completion request to OpenAI API. *