From 64e1e4e577730f7e871870e30487ae51fdc26fa7 Mon Sep 17 00:00:00 2001 From: "adamlui@protonmail.com" Date: Tue, 14 Jan 2025 04:13:02 -0800 Subject: [PATCH] Fixed GPTforLove API stopped working --- amazongpt/greasemonkey/amazongpt.user.js | 58 ++++++++++---------- bravegpt/greasemonkey/bravegpt.user.js | 58 ++++++++++---------- duckduckgpt/greasemonkey/duckduckgpt.user.js | 58 ++++++++++---------- googlegpt/greasemonkey/googlegpt.user.js | 58 ++++++++++---------- 4 files changed, 112 insertions(+), 120 deletions(-) diff --git a/amazongpt/greasemonkey/amazongpt.user.js b/amazongpt/greasemonkey/amazongpt.user.js index eb876fa69..0200cd9fa 100644 --- a/amazongpt/greasemonkey/amazongpt.user.js +++ b/amazongpt/greasemonkey/amazongpt.user.js @@ -3,7 +3,7 @@ // @description Adds the magic of AI to Amazon shopping // @author KudoAI // @namespace https://kudoai.com -// @version 2025.1.13.5 +// @version 2025.1.14https://ai28.gptforlove.com // @license MIT // @icon https://amazongpt.kudoai.com/assets/images/icons/amazongpt/black-gold-teal/icon48.png?v=0fddfc7 // @icon64 https://amazongpt.kudoai.com/assets/images/icons/amazongpt/black-gold-teal/icon64.png?v=0fddfc7 @@ -413,7 +413,7 @@ 'GPTforLove': { endpoint: 'https://api11.gptforlove.com/chat-process', expectedOrigin: { - url: 'https://ai27.gptforlove.com', + url: 'https://ai28.gptforlove.com', headers: { 'Accept': 'application/json, text/plain, */*', 'Priority': 'u=0', 'Sec-Fetch-Site': 'same-site' } @@ -2695,34 +2695,32 @@ chunk = extractedChunks.join('') } accumulatedChunks = apis[caller.api].accumulatesText ? chunk : accumulatedChunks + chunk - if (/^(?:\{|event:)/.test(accumulatedChunks)) api.tryNew(caller) - else { - try { // to show stream text - let textToShow = '' - if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text - const jsonLines = accumulatedChunks.split('\n'), - nowResult = JSON.parse(jsonLines[jsonLines.length - 1]) - if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies - textToShow = nowResult.text - } else textToShow = accumulatedChunks - const failMatch = failFlagsAndURLs.exec(textToShow) - if (failMatch) { - log.debug('Response text', textToShow) - log.error('Fail flag detected', `'${failMatch[0]}'`) - if (caller.status != 'done' && !caller.sender) api.tryNew(caller) - return - } else if (caller.status != 'done') { // app waiting or sending - if (!caller.sender) caller.sender = caller.api // app is waiting, become sender - if (caller.sender == caller.api // app is sending from this caller.api - && textToShow.trim() != '' // empty chunk not read - ) show.reply(textToShow) - } - } catch (err) { log.error('Error showing stream', err.message) } - return reader.read().then(({ done, value }) => { - if (caller.sender == caller.api) // am designated sender, recurse - processStreamText({ done, value }) - }).catch(err => log.error('Error reading stream', err.message)) - } + try { // to show stream text + let textToShow = '' + if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text + const jsonLines = accumulatedChunks.split('\n'), + nowResult = JSON.parse(jsonLines[jsonLines.length -1]) + if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies + textToShow = nowResult.text // for AI response + || JSON.stringify(nowResult) // for error response + } else textToShow = accumulatedChunks + const failMatch = failFlagsAndURLs.exec(textToShow) + if (failMatch) { + log.debug('Response text', textToShow) + log.error('Fail flag detected', `'${failMatch[0]}'`) + if (caller.status != 'done' && !caller.sender) api.tryNew(caller) + return + } else if (caller.status != 'done') { // app waiting or sending + if (!caller.sender) caller.sender = caller.api // app is waiting, become sender + if (caller.sender == caller.api // app is sending from this caller.api + && textToShow.trim() != '' // empty chunk not read + ) show.reply(textToShow) + } + } catch (err) { log.error('Error showing stream', err.message) } + return reader.read().then(({ done, value }) => { + if (caller.sender == caller.api) // am designated sender, recurse + processStreamText({ done, value }) + }).catch(err => log.error('Error reading stream', err.message)) } }, diff --git a/bravegpt/greasemonkey/bravegpt.user.js b/bravegpt/greasemonkey/bravegpt.user.js index 8d6b4353e..63afc630d 100644 --- a/bravegpt/greasemonkey/bravegpt.user.js +++ b/bravegpt/greasemonkey/bravegpt.user.js @@ -148,7 +148,7 @@ // @description:zu Yengeza izimpendulo ze-AI ku-Brave Search (inikwa amandla yi-GPT-4o!) // @author KudoAI // @namespace https://kudoai.com -// @version 2025.1.13.6 +// @version 2025.1.14 // @license MIT // @icon https://assets.bravegpt.com/images/icons/bravegpt/icon48.png?v=df624b0 // @icon64 https://assets.bravegpt.com/images/icons/bravegpt/icon64.png?v=df624b0 @@ -587,7 +587,7 @@ 'GPTforLove': { endpoint: 'https://api11.gptforlove.com/chat-process', expectedOrigin: { - url: 'https://ai27.gptforlove.com', + url: 'https://ai28.gptforlove.com', headers: { 'Accept': 'application/json, text/plain, */*', 'Priority': 'u=0', 'Sec-Fetch-Site': 'same-site' } @@ -3412,34 +3412,32 @@ chunk = extractedChunks.join('') } accumulatedChunks = apis[caller.api].accumulatesText ? chunk : accumulatedChunks + chunk - if (/^(?:\{|event:)/.test(accumulatedChunks)) api.tryNew(caller) - else { - try { // to show stream text - let textToShow = '' - if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text - const jsonLines = accumulatedChunks.split('\n'), - nowResult = JSON.parse(jsonLines[jsonLines.length - 1]) - if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies - textToShow = nowResult.text - } else textToShow = accumulatedChunks - const failMatch = failFlagsAndURLs.exec(textToShow) - if (failMatch) { - log.debug('Response text', textToShow) - log.error('Fail flag detected', `'${failMatch[0]}'`) - if (caller.status != 'done' && !caller.sender) api.tryNew(caller) - return - } else if (caller.status != 'done') { // app waiting or sending - if (!caller.sender) caller.sender = caller.api // app is waiting, become sender - if (caller.sender == caller.api // app is sending from this caller.api - && textToShow.trim() != '' // empty chunk not read - ) show.reply(textToShow, footerContent) - } - } catch (err) { log.error('Error showing stream', err.message) } - return reader.read().then(({ done, value }) => { - if (caller.sender == caller.api) // am designated sender, recurse - processStreamText({ done, value }) - }).catch(err => log.error('Error reading stream', err.message)) - } + try { // to show stream text + let textToShow = '' + if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text + const jsonLines = accumulatedChunks.split('\n'), + nowResult = JSON.parse(jsonLines[jsonLines.length -1]) + if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies + textToShow = nowResult.text // for AI response + || JSON.stringify(nowResult) // for error response + } else textToShow = accumulatedChunks + const failMatch = failFlagsAndURLs.exec(textToShow) + if (failMatch) { + log.debug('Response text', textToShow) + log.error('Fail flag detected', `'${failMatch[0]}'`) + if (caller.status != 'done' && !caller.sender) api.tryNew(caller) + return + } else if (caller.status != 'done') { // app waiting or sending + if (!caller.sender) caller.sender = caller.api // app is waiting, become sender + if (caller.sender == caller.api // app is sending from this caller.api + && textToShow.trim() != '' // empty chunk not read + ) show.reply(textToShow) + } + } catch (err) { log.error('Error showing stream', err.message) } + return reader.read().then(({ done, value }) => { + if (caller.sender == caller.api) // am designated sender, recurse + processStreamText({ done, value }) + }).catch(err => log.error('Error reading stream', err.message)) } }, diff --git a/duckduckgpt/greasemonkey/duckduckgpt.user.js b/duckduckgpt/greasemonkey/duckduckgpt.user.js index 3bb204563..e4061fe2e 100644 --- a/duckduckgpt/greasemonkey/duckduckgpt.user.js +++ b/duckduckgpt/greasemonkey/duckduckgpt.user.js @@ -148,7 +148,7 @@ // @description:zu Yengeza izimpendulo ze-AI ku-DuckDuckGo (inikwa amandla yi-GPT-4o!) // @author KudoAI // @namespace https://kudoai.com -// @version 2025.1.13.6 +// @version 2025.1.14 // @license MIT // @icon https://assets.ddgpt.com/images/icons/duckduckgpt/icon48.png?v=06af076 // @icon64 https://assets.ddgpt.com/images/icons/duckduckgpt/icon64.png?v=06af076 @@ -595,7 +595,7 @@ 'GPTforLove': { endpoint: 'https://api11.gptforlove.com/chat-process', expectedOrigin: { - url: 'https://ai27.gptforlove.com', + url: 'https://ai28.gptforlove.com', headers: { 'Accept': 'application/json, text/plain, */*', 'Priority': 'u=0', 'Sec-Fetch-Site': 'same-site' } @@ -3296,34 +3296,32 @@ chunk = extractedChunks.join('') } accumulatedChunks = apis[caller.api].accumulatesText ? chunk : accumulatedChunks + chunk - if (/^(?:\{|event:)/.test(accumulatedChunks)) api.tryNew(caller) - else { - try { // to show stream text - let textToShow = '' - if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text - const jsonLines = accumulatedChunks.split('\n'), - nowResult = JSON.parse(jsonLines[jsonLines.length - 1]) - if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies - textToShow = nowResult.text - } else textToShow = accumulatedChunks - const failMatch = failFlagsAndURLs.exec(textToShow) - if (failMatch) { - log.debug('Response text', textToShow) - log.error('Fail flag detected', `'${failMatch[0]}'`) - if (caller.status != 'done' && !caller.sender) api.tryNew(caller) - return - } else if (caller.status != 'done') { // app waiting or sending - if (!caller.sender) caller.sender = caller.api // app is waiting, become sender - if (caller.sender == caller.api // app is sending from this caller.api - && textToShow.trim() != '' // empty chunk not read - ) show.reply(textToShow) - } - } catch (err) { log.error('Error showing stream', err.message) } - return reader.read().then(({ done, value }) => { - if (caller.sender == caller.api) // am designated sender, recurse - processStreamText({ done, value }) - }).catch(err => log.error('Error reading stream', err.message)) - } + try { // to show stream text + let textToShow = '' + if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text + const jsonLines = accumulatedChunks.split('\n'), + nowResult = JSON.parse(jsonLines[jsonLines.length -1]) + if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies + textToShow = nowResult.text // for AI response + || JSON.stringify(nowResult) // for error response + } else textToShow = accumulatedChunks + const failMatch = failFlagsAndURLs.exec(textToShow) + if (failMatch) { + log.debug('Response text', textToShow) + log.error('Fail flag detected', `'${failMatch[0]}'`) + if (caller.status != 'done' && !caller.sender) api.tryNew(caller) + return + } else if (caller.status != 'done') { // app waiting or sending + if (!caller.sender) caller.sender = caller.api // app is waiting, become sender + if (caller.sender == caller.api // app is sending from this caller.api + && textToShow.trim() != '' // empty chunk not read + ) show.reply(textToShow) + } + } catch (err) { log.error('Error showing stream', err.message) } + return reader.read().then(({ done, value }) => { + if (caller.sender == caller.api) // am designated sender, recurse + processStreamText({ done, value }) + }).catch(err => log.error('Error reading stream', err.message)) } }, diff --git a/googlegpt/greasemonkey/googlegpt.user.js b/googlegpt/greasemonkey/googlegpt.user.js index bdd09a3b0..2473bfd64 100644 --- a/googlegpt/greasemonkey/googlegpt.user.js +++ b/googlegpt/greasemonkey/googlegpt.user.js @@ -149,7 +149,7 @@ // @description:zu Yengeza izimpendulo ze-AI ku-Google Search (inikwa amandla yi-Google Gemma + GPT-4o!) // @author KudoAI // @namespace https://kudoai.com -// @version 2025.1.13.6 +// @version 2025.1.14 // @license MIT // @icon https://assets.googlegpt.io/images/icons/googlegpt/black/icon48.png?v=59409b2 // @icon64 https://assets.googlegpt.io/images/icons/googlegpt/black/icon64.png?v=59409b2 @@ -777,7 +777,7 @@ 'GPTforLove': { endpoint: 'https://api11.gptforlove.com/chat-process', expectedOrigin: { - url: 'https://ai27.gptforlove.com', + url: 'https://ai28.gptforlove.com', headers: { 'Accept': 'application/json, text/plain, */*', 'Priority': 'u=0', 'Sec-Fetch-Site': 'same-site' } @@ -3633,34 +3633,32 @@ chunk = extractedChunks.join('') } accumulatedChunks = apis[caller.api].accumulatesText ? chunk : accumulatedChunks + chunk - if (/^(?:\{|event:)/.test(accumulatedChunks)) api.tryNew(caller) - else { - try { // to show stream text - let textToShow = '' - if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text - const jsonLines = accumulatedChunks.split('\n'), - nowResult = JSON.parse(jsonLines[jsonLines.length - 1]) - if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies - textToShow = nowResult.text - } else textToShow = accumulatedChunks - const failMatch = failFlagsAndURLs.exec(textToShow) - if (failMatch) { - log.debug('Response text', textToShow) - log.error('Fail flag detected', `'${failMatch[0]}'`) - if (caller.status != 'done' && !caller.sender) api.tryNew(caller) - return - } else if (caller.status != 'done') { // app waiting or sending - if (!caller.sender) caller.sender = caller.api // app is waiting, become sender - if (caller.sender == caller.api // app is sending from this caller.api - && textToShow.trim() != '' // empty chunk not read - ) show.reply(textToShow, footerContent) - } - } catch (err) { log.error('Error showing stream', err.message) } - return reader.read().then(({ done, value }) => { - if (caller.sender == caller.api) // am designated sender, recurse - processStreamText({ done, value }) - }).catch(err => log.error('Error reading stream', err.message)) - } + try { // to show stream text + let textToShow = '' + if (caller.api == 'GPTforLove') { // extract parentID + latest chunk text + const jsonLines = accumulatedChunks.split('\n'), + nowResult = JSON.parse(jsonLines[jsonLines.length -1]) + if (nowResult.id) apis.GPTforLove.parentID = nowResult.id // for contextual replies + textToShow = nowResult.text // for AI response + || JSON.stringify(nowResult) // for error response + } else textToShow = accumulatedChunks + const failMatch = failFlagsAndURLs.exec(textToShow) + if (failMatch) { + log.debug('Response text', textToShow) + log.error('Fail flag detected', `'${failMatch[0]}'`) + if (caller.status != 'done' && !caller.sender) api.tryNew(caller) + return + } else if (caller.status != 'done') { // app waiting or sending + if (!caller.sender) caller.sender = caller.api // app is waiting, become sender + if (caller.sender == caller.api // app is sending from this caller.api + && textToShow.trim() != '' // empty chunk not read + ) show.reply(textToShow) + } + } catch (err) { log.error('Error showing stream', err.message) } + return reader.read().then(({ done, value }) => { + if (caller.sender == caller.api) // am designated sender, recurse + processStreamText({ done, value }) + }).catch(err => log.error('Error reading stream', err.message)) } },