From c58e7e39dee1ec8dff105d187be74e19eb1ab2cc Mon Sep 17 00:00:00 2001 From: Nick Hale <4175918+njhale@users.noreply.github.com> Date: Thu, 30 Jan 2025 23:25:05 -0500 Subject: [PATCH] enhance: optimistic search page loading and improved content refinement Signed-off-by: Nick Hale <4175918+njhale@users.noreply.github.com> --- google/search/package-lock.json | 29 ++++- google/search/package.json | 2 + google/search/src/headers.ts | 38 ++++++ google/search/src/refine.ts | 218 +++++++++++++++++++++++--------- google/search/src/search.ts | 215 +++++++++++++++++++++---------- google/search/src/server.ts | 38 +++--- google/search/src/session.ts | 26 ---- google/search/tool.gpt | 4 +- 8 files changed, 395 insertions(+), 175 deletions(-) create mode 100644 google/search/src/headers.ts diff --git a/google/search/package-lock.json b/google/search/package-lock.json index 205d57c6..1905bf9f 100644 --- a/google/search/package-lock.json +++ b/google/search/package-lock.json @@ -18,6 +18,8 @@ "express": "^4.18.2", "global-cache-dir": "^6.0.0", "playwright": "^1.46.0", + "prettier": "^3.4.2", + "tiktoken": "^1.0.18", "ts-node-dev": "^2.0.0", "turndown": "^7.1.3" }, @@ -132,9 +134,9 @@ } }, "node_modules/@gptscript-ai/gptscript": { - "version": "0.9.5", - "resolved": "https://registry.npmjs.org/@gptscript-ai/gptscript/-/gptscript-0.9.5.tgz", - "integrity": "sha512-7tzZJ2ZU5oXCNm2/4DkfvoqXMbGC9CNGpFzo5nI8ppZkFstcGUmGvvfrIEWYOnlJ3wx9yP9myTv9LmIfY8Tfog==", + "version": "v0.9.5", + "resolved": "git+ssh://git@github.com/gptscript-ai/node-gptscript.git#306861b8b5aea2ea0481c180067a1687d2726530", + "integrity": "sha512-BqOLYbkQzTtPHH4/A7/Foyt6jnSnZip9O9A7CUsBWd6JQTZxe30zZzQ+eWrBpKNlws7rnCPZZgS5qACUpkUelQ==", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { @@ -4044,6 +4046,21 @@ "node": ">= 0.8.0" } }, + "node_modules/prettier": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.2.tgz", + "integrity": "sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ==", + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -4640,6 +4657,12 @@ "dev": true, "license": "MIT" }, + "node_modules/tiktoken": { + "version": "1.0.18", + "resolved": "https://registry.npmjs.org/tiktoken/-/tiktoken-1.0.18.tgz", + "integrity": "sha512-DXJesdYwmBHtkmz1sji+UMZ4AOEE8F7Uw/PS/uy0XfkKOzZC4vXkYXHMYyDT+grdflvF4bggtPt9cYaqOMslBw==", + "license": "MIT" + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", diff --git a/google/search/package.json b/google/search/package.json index 40f92d74..0b9ecc20 100644 --- a/google/search/package.json +++ b/google/search/package.json @@ -31,6 +31,8 @@ "express": "^4.18.2", "global-cache-dir": "^6.0.0", "playwright": "^1.46.0", + "prettier": "^3.4.2", + "tiktoken": "^1.0.18", "ts-node-dev": "^2.0.0", "turndown": "^7.1.3" } diff --git a/google/search/src/headers.ts b/google/search/src/headers.ts new file mode 100644 index 00000000..da4ca91a --- /dev/null +++ b/google/search/src/headers.ts @@ -0,0 +1,38 @@ +import { type IncomingHttpHeaders } from 'node:http' +import { createHash } from 'node:crypto' + +export interface ModelProviderCredentials { + baseUrl: string + apiKey: string +} + +export function getModelProviderCredentials(headers: IncomingHttpHeaders): ModelProviderCredentials | undefined { + const baseUrl = getGPTScriptEnv(headers, 'OPENAI_BASE_URL')?.trim() + if (!baseUrl) return undefined + + const apiKey = getGPTScriptEnv(headers, 'OPENAI_API_KEY')?.trim() + if (!apiKey) return undefined + + return { baseUrl, apiKey } +} + +export function getSessionId(headers: IncomingHttpHeaders): string { + const workspaceId = getGPTScriptEnv(headers, 'GPTSCRIPT_WORKSPACE_ID') + if (!workspaceId?.trim()) throw new Error('No GPTScript workspace ID provided') + + return createHash('sha256').update(workspaceId).digest('hex').substring(0, 16) +} + +export function getGPTScriptEnv(headers: IncomingHttpHeaders, envKey: string): string | undefined { + const envHeader = headers?.['x-gptscript-env'] + const envArray = Array.isArray(envHeader) ? envHeader : [envHeader] + + for (const env of envArray) { + if (env == null) continue + for (const pair of env.split(',')) { + const [key, value] = pair.split('=').map((part) => part.trim()) + if (key === envKey) return value + } + } + return undefined +} \ No newline at end of file diff --git a/google/search/src/refine.ts b/google/search/src/refine.ts index 23e4883d..d8dab1a7 100644 --- a/google/search/src/refine.ts +++ b/google/search/src/refine.ts @@ -1,39 +1,139 @@ +import { encoding_for_model } from "tiktoken" import {GPTScript, type ToolDef} from "@gptscript-ai/gptscript" import {type SearchResult, type SearchResults} from "./search.ts" +import {type ModelProviderCredentials} from "./headers.ts" + +// Max number of tokens in the search results +const MAX_RESULTS_TOKENS = 50000 const gptscript = new GPTScript() -export async function refine (unrefined: SearchResults): Promise { - const now = new Date().toISOString() - const refined = await Promise.all( +export async function refine (creds: ModelProviderCredentials | undefined, unrefined: SearchResults): Promise { + const totalUnrefinedTokens = tokenCount(unrefined.results.reduce((acc, result) => acc + result.content, '')) + if (totalUnrefinedTokens <= MAX_RESULTS_TOKENS) { + console.info(`Total tokens (${totalUnrefinedTokens}) are within the limit (${MAX_RESULTS_TOKENS}), skipping refinement`) + return unrefined + } + + if (!creds) { + console.warn('No model provider credentials provided, skipping refinement') + return unrefined + } + + console.info(`Total tokens (${totalUnrefinedTokens}) are above the limit (${MAX_RESULTS_TOKENS}), calling GPTScript to refine results`) + + const now = userDateTime() + let refined = await Promise.all( unrefined.results.map(async (result) => { - if (result.content?.length ?? 0 <= 10000) { - // Don't refine content that is 10k tokens or less - return result + const refinedContent = await refineContent(creds, now, unrefined.query, result) + const refinedTokens = tokenCount(refinedContent.content) + return { + ...result, + ...refinedContent, + refinedTokens } - - return await refineResult(now, unrefined.query, result) }) ) - return { - ...unrefined, - results: refined.filter(result => hasContent(result.content)) + const totalRefinedTokens = refined.reduce((sum, r) => sum + r.refinedTokens, 0) + if (totalRefinedTokens <= MAX_RESULTS_TOKENS) { + // If the refined tokens already fit the limit, return as is. + return { query: unrefined.query, results: refined } + } + + // Filter zero score or zero tokens + refined = refined.filter(r => r.score > 0 && r.refinedTokens > 0) + + // Sort by "value density" = score / tokens (descending) + refined.sort((a, b) => (b.score / b.refinedTokens) - (a.score / a.refinedTokens)) + + const pruned: SearchResult[] = [] + let tokenBudget = MAX_RESULTS_TOKENS + + for (const r of refined) { + if (tokenBudget < 1) break + + if (r.refinedTokens >= tokenBudget) { + // If the result is too long, truncate it to fit the budget + const truncated = truncateContent(r.content, tokenBudget) + pruned.push({ + ...r, + content: truncated.content, + }) + + // Consume the tokens from the budget + tokenBudget -= truncated.tokenCount + continue + } + + // The entire result fits in the budget, so add it to the pruned results + pruned.push(r) + tokenBudget -= r.refinedTokens + } + + return { query: unrefined.query, results: pruned } +} + +function tokenCount (content?: string): number { + if (!content || content.length === 0) { + return 0 + } + + const enc = encoding_for_model('gpt-4o-mini'); + try { + return enc.encode(content).length; + } catch (e) { + console.warn('Error encoding content', e); + } finally { + // Free encoding resources when done + enc.free() + } + + return 0 +} + + +function truncateContent (content: string, maxTokens: number): { + content: string, + tokenCount: number +} { + const codec = encoding_for_model('gpt-4o-mini'); + try { + const tokens = codec.encode(content) + const truncated = tokens.slice(0, maxTokens) + return { + content: new TextDecoder().decode(truncated), + tokenCount: truncated.length + } + } finally { + codec.free() } } -function hasContent (content?: string | string[]): boolean { - return !(Array.isArray(content) ? content?.length === 0 : content?.trim() === '') + +function userDateTime (): string { + const tz = process.env.TIMEZONE || 'UTC'; + try { + new Intl.DateTimeFormat('en-US', { timeZone: tz }); + } catch { + return new Date().toLocaleString('en-US', { timeZone: 'UTC', timeZoneName: 'short' }); + } + return new Date().toLocaleString('en-US', { timeZone: tz, timeZoneName: 'short' }); } -async function refineResult ( + +async function refineContent ( + creds: ModelProviderCredentials, time: string, query: string, - result: SearchResult): Promise { + result: SearchResult): Promise<{ + content: string, + score: number + }> { const tool: ToolDef = { chat: false, - jsonResponse: true, + jsonResponse: false, modelName: process.env.OBOT_DEFAULT_LLM_MINI_MODEL ?? 'gpt-4o-mini', temperature: 0.0, arguments: { @@ -43,38 +143,54 @@ async function refineResult ( type: 'string', description: 'Current date and time that the search was requested at' }, - query: { + topic: { type: 'string', - description: 'query or subject matter to generate citations for' + description: 'Topic to extract excerpts for' }, url: { type: 'string', - description: 'URL that the content was sourced from' + description: 'URL that the markdown content was sourced from' }, content: { type: 'string', - description: 'Markdown content to cite' + description: 'Markdown document created by exporting an HTML web page to markdown' } }, - required: ['query', 'url', 'content'] + required: ['time', 'topic', 'url', 'content'] }, instructions: refineInstructions } const run = await gptscript.evaluate(tool, { - input: JSON.stringify({ - query, - ...result, - time + BaseURL: creds.baseUrl, + APIKey: creds.apiKey, + input: minify({ + time, + topic: query, + url: result.url, + content: result.content }) }) - return await run.json() + // Parse the output into a score and content + const output = await run.text() + const [firstLine, ...restLines] = output?.split('\n') ?? [] + const score = Math.max(1, Math.min(10, parseInt(firstLine, 10))) || 0 + const content = restLines.join('\n') + + return { score, content } } // Note: Tools can't introspect their parameters schema, so we provide it in the instructions as well const refineInstructions = ` -Given an object with the following JSON schema: +Do not respond with any additional dialog or commentary. + +You are a research assistant tasked with extracting excerpts from a markdown document that will +be used as notes to conduct detailed research about a given topic. + +The document is the result of exporting an HTML webpage to markdown. + +When given an object with the following JSON schema: ${minify({ type: 'object', @@ -83,51 +199,35 @@ ${minify({ type: 'string', description: 'Current date and time that the search was requested at' }, - query: { + topic: { type: 'string', - description: 'Query or subject matter to generate citations for' + description: 'Topic to extract excerpts for' }, url: { type: 'string', - description: 'URL that the content was sourced from' + description: 'URL that the markdown content was sourced from' }, content: { type: 'string', - description: 'Markdown content to cite' + description: 'Markdown document created by exporting an HTML web page to markdown' } }, - required: ['query', 'url', 'content', 'time'] + required: ['time', 'topic', 'url', 'content', 'time'] })} -Select all markdown from \${CONTENT} containing information useful to cite when researching \${QUERY}. -Selected markdown should contain the most useful and relevant information to \${QUERY} available in \${CONTENT}. -Don't select markdown that is not helpful or related to \${QUERY}. - -Respond with a single object containing all of the selected markdown that adheres to the following JSON schema: +Perform the following steps in order: +1. Refine the markdown content by removing all: + - boilerplate and unintelligable text + - unrelated advertisements, links, and web page structure +2. Select excerpts from the refined content that you think would make good notes for conducting detailed research about the topic +3. Compose a concise markdown document containing the excerpts organized in decending order of importance to understanding the topic. Do not paraphrase, summarize, or reword the excerpts. The goal is to preserve as much of the original content as possible. +4. Grade the corpus of excerpts as a whole based how well it covers the topic on a scale of 0-10, where high scores are good and low scores contain no relevant information -${minify({ - type: 'object', - properties: { - url: { - type: 'string', - description: 'URL that the content was sourced from' - }, - title: { - type: 'string', - description: 'Main title of the source content' - }, - content: { - type: 'array', - description: 'Cleaned up markdown from the original content that can be cited to research the query', - items: { - type: 'string' - } - } - }, - required: ['url', 'title', 'content'] -})} +Afterwards, respond with the grade followed by the markdown document on a new line. -Do not respond with any additional dialog or commentary. +EXAMPLE +5 + ` function minify (obj: object): string { diff --git a/google/search/src/search.ts b/google/search/src/search.ts index 42e50809..3de05948 100644 --- a/google/search/src/search.ts +++ b/google/search/src/search.ts @@ -1,11 +1,12 @@ import { type BrowserContext, type Page } from '@playwright/test' -import * as cheerio from 'cheerio'; +import * as cheerio from 'cheerio' import TurndownService from 'turndown' +import prettier from 'prettier' export interface SearchResult { url: string - title?: string - content?: string | string[] + content?: string + debug?: string[] } export interface SearchResults { @@ -13,7 +14,8 @@ export interface SearchResults { results: SearchResult[] } -export async function search ( + +export async function search( context: BrowserContext, query: string, maxResults: number @@ -27,15 +29,11 @@ export async function search ( const foundURLs = new Set() const results: Array> = [] - - const page = await context.newPage() - const pages = await Promise.all( - Array.from({ length: maxResults }, () => context.newPage()) - ) + const searchPage = await context.newPage() try { - await page.goto(searchUrl) - const content = await page.content() + await searchPage.goto(searchUrl) + const content = await searchPage.content() const $ = cheerio.load(content) const elements = $('#rso a[jsname]') @@ -43,80 +41,157 @@ export async function search ( if (results.length >= maxResults) return false const url = $(element).attr('href') ?? '' - if ((url !== '') && !url.includes('youtube.com/watch?v') && !foundURLs.has(url)) { + if (url && !url.includes('youtube.com/watch?v') && !foundURLs.has(url)) { foundURLs.add(url) - results.push(getMarkdown(pages[results.length], url).then(content => { - return (content !== '') ? { url, content } : null - })) + // Create a new page per result and process it concurrently. + results.push( + (async () => { + const childPage = await context.newPage() + try { + return await getMarkdown(childPage, url) + } finally { + await childPage.close().catch(() => {}) + } + })() + ) } }) - return { - query, - results: (await Promise.all(results)).filter(Boolean) as SearchResult[] - } + const resolvedResults = (await Promise.all(results)).filter(Boolean) as SearchResult[] + return { query, results: resolvedResults } } finally { - // Fire and forget page close so we can move on - void page.close() - void Promise.all(pages.map(async p => { await p.close() })) + await searchPage.close().catch(() => {}) } } -export async function getMarkdown (page: Page, url: string): Promise { - try { - await page.goto(url, { timeout: 1000 }) - await page.waitForLoadState('networkidle', { timeout: 1000 }) - } catch (e) { - console.warn('slow page:', url) - } - let content = '' - while (content === '') { - let fails = 0 - try { - content = await page.evaluate(() => document.documentElement.outerHTML) - } catch (e) { - fails++ - if (fails > 2) { - void page.close() - console.warn('rip:', url) - return '' // Page didn't load; just ignore. +export async function getMarkdown (page: Page, url: string): Promise { + const result: SearchResult = { url, debug: [] } + const pageContent = await stableContent(url, page, 500, 2000, 2000) + const $ = cheerio.load(pageContent) + + // Remove scripts, styles, and iframes outright. + $('noscript, script, style, iframe').remove() + + // Remove elements that are clearly ads based on class or id. + $('[class*="ad-"], [id*="ad-"], [class*="advert"], [id*="advert"], .ads, .sponsored').remove() + + // Remove header, footer, nav, and aside elements. + $('header, footer, nav, aside').remove() + + // Remove other known boilerplate selectors. + $('.sidebar, .advertisement, .promo, .related-content').remove() + + // Try common selectors in order of preference. + let content = '' + const mainSelectors = [ + 'main', + 'article', + '.content', + '.post-content', + '.entry-content', + '.main-content', + 'body' + ] + + for (const selector of mainSelectors) { + const section = $(selector) + if (!section.length) continue + + let selected = '' + for (const el of section) { + selected += await toMarkdown($.html(el)) } - await new Promise(resolve => setTimeout(resolve, 100)) // sleep 100ms - } - } - void page.close() - - const $ = cheerio.load(content) - - // Remove common elements that are not part of the page content - $('noscript, script, style, img, g, svg, iframe').remove(); - $('header, footer, nav, aside').remove(); - $('.sidebar, .advertisement, .promo, .related-content').remove(); - - let resp = '' - const turndownService = new TurndownService({ - headingStyle: 'atx', - bulletListMarker: '-', - }) - - // Prioritize main content selectors, eventually falling back to the full body - const mainSelectors = ['main', 'article', '.content', '.post-content', '.entry-content', '.main-content', 'body']; - for (const selector of mainSelectors) { - if ($(selector).first().length < 1) { - continue; + + if (selected.length < 200) { + result.debug?.push(`Selector ${selector} found but extracted no content, skipping...`) + continue + } + + content = selected + result.debug?.push(`Extracted ${content.length} characters with selector: ${selector}`) + break } + result.content = content - $(selector).each(function () { - resp += turndownService.turndown($.html(this)) - }) - break + return { + ...result, + content: content, + } +} + +async function stableContent ( + url: string, + page: Page, + quietPeriod = 500, + navigateTimeout = 2000, + stablizeTimeout = 2000 +): Promise { + try { + // Wait up to 2s to navigate to the result URL. + // Note: This handles redirects. + await page.goto(url, { timeout: 1000 }) + } catch (e) { + console.warn('Page :', url, e) } - return trunc(resp, 50000) + return await page.evaluate( + ({ quietPeriod, stablizeTimeout }) => { + return new Promise((resolve) => { + let quietTimer: number + const observer = new MutationObserver(() => { + clearTimeout(quietTimer) + quietTimer = window.setTimeout(() => { + observer.disconnect() + // Capture and return the content when stability is reached + resolve(document.documentElement.outerHTML) + }, quietPeriod) + }) + observer.observe(document.body, { + childList: true, + subtree: true, + characterData: true + }) + // Start a quiet timer in case no mutations occur + quietTimer = window.setTimeout(() => { + observer.disconnect() + resolve(document.documentElement.outerHTML) + }, quietPeriod) + // Fallback: resolve after maxWait even if mutations continue + window.setTimeout(() => { + observer.disconnect() + resolve(document.documentElement.outerHTML) + }, stablizeTimeout) + }) + }, + { quietPeriod, stablizeTimeout } + ) } -function trunc (text: string, max: number): string { - return text.length > max ? text.slice(0, max) + '...' : text +// Create a TurndownService instance with compact options +const turndownService = new TurndownService({ + headingStyle: 'atx', // One-line headings, e.g. "# Heading" + bulletListMarker: '-', // Use '-' for list items + codeBlockStyle: 'fenced', // Use fenced code blocks (```) + fence: '```', + emDelimiter: '*', // Use asterisk for emphasis + strongDelimiter: '**', // Use double asterisk for strong text + linkStyle: 'inlined', // User referenced style to reduce the number of links +}) + +// Configure Prettier to produce compact markdown +const prettierOptions: prettier.Options = { + parser: 'markdown', + printWidth: 9999, // Set very high to avoid wrapping lines + proseWrap: 'never', // Don't force wrapping of prose + tabWidth: 1, // Use a single space for indentation (minimum available) + useTabs: false, + trailingComma: 'none' +}; + +async function toMarkdown (html: string): Promise { + let md = turndownService.turndown(html) + md = await prettier.format(md, prettierOptions) + return md.replace(/\n{3,}/g, '\n\n').trim() } diff --git a/google/search/src/server.ts b/google/search/src/server.ts index 79b4172d..953a0cea 100644 --- a/google/search/src/server.ts +++ b/google/search/src/server.ts @@ -1,7 +1,8 @@ import bodyParser from 'body-parser' -import { getSessionId, SessionManager } from './session.ts' +import { getSessionId, getModelProviderCredentials } from './headers.ts' +import { SessionManager } from './session.ts' import express, { type Request, type Response, type RequestHandler } from 'express' -import { search } from './search.ts' +import { search, SearchResults } from './search.ts' import { refine } from './refine.ts' async function main (): Promise { @@ -32,29 +33,34 @@ async function main (): Promise { const maxResults = Number.isInteger(Number(data.maxResults)) ? parseInt(data.maxResults as string, 10) : 3 const query: string = data.query ?? '' const sessionID = getSessionId(req.headers) + let searchResults: SearchResults + let searchEnd: number await sessionManager.withSession(sessionID, async (browserContext) => { // Query Google and get the result pages as markdown - const searchResults = await search( + searchResults = await search( browserContext, query, maxResults ) - const searchEnd = performance.now() + searchEnd = performance.now() + }) - // Extract the relevant citations from the content of each page - const refinedResults = await refine(searchResults) - const refineEnd = performance.now() + // Extract the relevant citations from the content of each page + const refinedResults = await refine( + getModelProviderCredentials(req.headers), + searchResults! + ) + const refineEnd = performance.now() - res.status(200).send(JSON.stringify({ - duration: { - search: (searchEnd - responseStart) / 1000, - refine: (refineEnd - searchEnd) / 1000, - response: (refineEnd - responseStart) / 1000 - }, - ...refinedResults - })) - }) + res.status(200).send(JSON.stringify({ + duration: { + search: (searchEnd! - responseStart) / 1000, + refine: (refineEnd - searchEnd!) / 1000, + response: (refineEnd - responseStart) / 1000 + }, + ...refinedResults + })) } catch (error: unknown) { const msg = error instanceof Error ? error.message : String(error) // Send a 200 status code GPTScript will pass the error to the LLM diff --git a/google/search/src/session.ts b/google/search/src/session.ts index 8a107b45..628a77df 100644 --- a/google/search/src/session.ts +++ b/google/search/src/session.ts @@ -1,8 +1,6 @@ import fs from 'node:fs/promises' import os from 'node:os' import path from 'node:path' -import { type IncomingHttpHeaders } from 'node:http' -import { createHash } from 'node:crypto' import { type BrowserContext } from 'playwright' import { newBrowserContext } from './context.ts' import TTLCache from '@isaacs/ttlcache' @@ -171,27 +169,3 @@ export class SessionManager { } } -export function getSessionId(headers: IncomingHttpHeaders): string { - const workspaceId = getGPTScriptEnv(headers, 'GPTSCRIPT_WORKSPACE_ID') - if (workspaceId == null) throw new Error('No GPTScript workspace ID provided') - - return createHash('sha256').update(workspaceId).digest('hex').substring(0, 16) -} - -export function getWorkspaceId(headers: IncomingHttpHeaders): string | undefined { - return getGPTScriptEnv(headers, 'GPTSCRIPT_WORKSPACE_ID') -} - -export function getGPTScriptEnv(headers: IncomingHttpHeaders, envKey: string): string | undefined { - const envHeader = headers?.['x-gptscript-env'] - const envArray = Array.isArray(envHeader) ? envHeader : [envHeader] - - for (const env of envArray) { - if (env == null) continue - for (const pair of env.split(',')) { - const [key, value] = pair.split('=').map((part) => part.trim()) - if (key === envKey) return value - } - } - return undefined -} diff --git a/google/search/tool.gpt b/google/search/tool.gpt index 7fb6a6b8..2ef54e54 100644 --- a/google/search/tool.gpt +++ b/google/search/tool.gpt @@ -7,7 +7,8 @@ Share Tools: Search --- Name: Search Description: Search Google with a given query and return relevant information from the search results. Search with more maxResults if you need more information. -JSON Response: true +Metadata: noUserAuth: sys.model.provider.credential +Credential: sys.model.provider.credential Share Context: Current Date and Time from ../../time Tools: service Args: query: A question, statement, or topic to search with (required) @@ -17,6 +18,7 @@ Args: maxResults: The maximum number of search results to gather relevant inform --- Name: service +Metadata: requestedEnvVars: OPENAI_API_KEY,OPENAI_BASE_URL Metadata: index: false #!sys.daemon /usr/bin/env npm --prefix ${GPTSCRIPT_TOOL_DIR} run server