diff --git a/.github/ISSUE_TEMPLATE/new-model-request.md b/.github/ISSUE_TEMPLATE/new-model-request.md deleted file mode 100644 index 4b1c436f9..000000000 --- a/.github/ISSUE_TEMPLATE/new-model-request.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -name: New model request -about: Request to support new model -title: '' -labels: new model request -assignees: '' - ---- - -**Model name** - -**Link** diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1ef7d7896..ff3e758ff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,74 +1,2 @@ # Contributing If you want to contribute, open a PR, issue, or start a discussion on our [Discord](https://discord.gg/dSBY3ms2Qr). - -# 🤖 Adding a new model provider -If you want to add a new model provider (like OpenAI or HuggingFace) complete the following steps and create a PR. - -When you add a provider you can also add a specific model (like OpenAI's GPT-4) under that provider. - -Here is an [example code for adding a new provider](./NEW_PROVIDER_EXAMPLE.md). - -## 1. Add the provider to **frontend** -- Add provider name to `ModelProvider` enum in [state/model.ts](state/model.ts) -- Add provider and models template to `modelTemplates` object in [state/model.ts](state/model.ts) - - `creds` and `args` defined in the `modelTemplates` are accessible on backend in `get_model` under their exact names in `config["args"]` object. -- Add provider's PNG icon image to [`public/`](public/open-ai.png) in a resolution that is bigger than 30x30 px. -- Add provider's icon path to `iconPaths` object in [components/icons/ProviderIcon.tsx](components/icons/ProviderIcon.tsx) - -## 2. Add provider to **backend** ([api-service/models/base.py](api-service/models/base.py)) -- Add provider name to `ModelProvider` enum -- Add provider integration (implementing LangChain's `BaseLanguageModel`) to `get_model` function. You can use an existing integration from LangChain or create a new integration from scratch. - -The new provider integrations should be placed in `api-service/models/providers/`. - -## Provider integrations -We use [LangChain](https://github.com/hwchase17/langchain) under the hood, so if you are adding a new integration you have to implement the `BaseLanguageModel` class. That means implementing the `_acall` async method that calls the model with a prompt and returns the output and also calling `self.callback_manager.on_llm_new_token` from inside the `_acall` method to diggest the output. - -### **Using [LangChain](https://python.langchain.com/en/latest/modules/models/llms/integrations.html) integration** -You can often use existing LangChain integrations to add new model providers to e2b with just a few modifications. - -[Here](api-service/models/providers/replicate.py) is an example of modified [Replicate](https://replicate.com/) integration. We had to add `_acall` method to support async execution and override `validate_environment` to prevent checking if the Replicate API key env var is set up because we pass the env var via a normal parameter. - -If you are modifying existing LangChain integration add it to `api-service/models/providers/.py`. - -### **From scratch** -You can follow the [langchain's guide](https://python.langchain.com/en/latest/modules/models/llms/examples/custom_llm.html) to implement the `LLM` class (it inherits from `BaseLanguageModel`). - -Here is an example of the implementation: - -```py -from typing import List, Optional -from langchain.llms.base import LLM - -class NewModelProviderWithStreaming(LLM): - temperature: str - new_provider_api_token: str - - # You only need to implement the `_acall` method - async def _acall(self, prompt: str, stop: Optional[List[str]] = None) -> str: - # Call the model and get outputs - # You can use `temperature` and `new_provider_api_token` args - text = "" - for token in outputs: - text += token - if self.callback_manager.is_async: - await self.callback_manager.on_llm_new_token( - token, - verbose=self.verbose, - # We explicitly flush the logs in log queue because the calls to this model are not actually async so they block. - flush=True, - ) - else: - self.callback_manager.on_llm_new_token( - token, - verbose=self.verbose, - ) - return text -``` - -## 3. Test -Test if the provider works by starting the app, selecting the provider and model in the "Model" sidebar menu and trying to "Run" it. - -![](docs-assets/change-model.gif) - -Then add a screenshot of agent's steps to the PR. diff --git a/api-client/AgentConnection.ts b/api-client/AgentConnection.ts deleted file mode 100644 index 17e5e8566..000000000 --- a/api-client/AgentConnection.ts +++ /dev/null @@ -1,112 +0,0 @@ -import { RpcWebSocketClient, IRpcNotification } from 'rpc-websocket-client' - -import { Log } from 'db/types' -import { ModelConfig } from 'state/model' - - -export interface Step { - output: string - logs: Log[] -} - -export interface StepEdit { - stepIdx: number - output: string -} - -export enum AgentRunState { - None, - Running, - Paused, -} - -export interface Opts { - onSteps: (steps: Step[]) => void - onClose: () => void - onStateChange: (runState: AgentRunState) => void -} - -export class AgentConnection { - private readonly rpc = new RpcWebSocketClient() - private readonly url: string - - constructor(url: string, private readonly opts: Opts, private readonly projectID: string) { - this.rpc.onNotification.push(this.handleNotification.bind(this)) - this.rpc.onClose(opts.onClose) - this.url = `${url.replace('http', 'ws')}?project_id=${this.projectID}` - } - - async connect() { - await this.rpc.connect(this.url) - } - - async disconnect() { - console.log('closing') - // This is the browser WebSocket way of closing connection - // TODO: Test this connection closing - this.rpc.ws?.close() - } - - private async handleNotification(data: IRpcNotification) { - switch (data.method) { - case 'logs': - if (data.params.logs) { - this.opts.onSteps(data.params.logs) - } - break - case 'interaction_request': - if (data.params.type === 'done') { - this.opts.onStateChange(AgentRunState.None) - await this.disconnect() - } else { - console.error('Unhandled interaction request', data) - } - break - default: - console.error('Unknown notification method', data) - break - } - } - - async start(config: ModelConfig & { templateID: string }, instructions: any) { - await this.rpc.call('start', { config, instructions }) - this.opts.onStateChange(AgentRunState.Running) - } - - private async interaction(type: string, data?: any) { - await this.rpc.call('interaction', { type, data }) - } - - async pauseRun() { - await this.interaction('pause') - this.opts.onStateChange(AgentRunState.Paused) - } - - async resumeRun() { - await this.interaction('resume') - this.opts.onStateChange(AgentRunState.Running) - } - - async cancelRun() { - await this.rpc.call('stop') - this.opts.onStateChange(AgentRunState.None) - await this.disconnect() - } - - async rewriteRunSteps(steps: Step[]) { - await this.interaction('rewrite_steps', { steps }) - this.opts.onStateChange(AgentRunState.Running) - } - - static resolveStepsEdit(steps: Step[], edit: StepEdit): Step[] | undefined { - const step = steps[edit.stepIdx] - if (!step) { - throw new Error('Step does not exist') - } - if (step.output === edit.output) return - step.output = edit.output - step.logs = [] - - return steps.slice(0, edit.stepIdx + 1) - } -} diff --git a/api-service/agent/smol_agent.py b/api-service/agent/smol_agent.py index b31ce8883..a72aba9c9 100644 --- a/api-service/agent/smol_agent.py +++ b/api-service/agent/smol_agent.py @@ -4,6 +4,8 @@ import ast from decimal import Decimal +from langchain.chat_models import ChatOpenAI +from langchain.schema import BaseLanguageModel from typing import Any, Callable, List from langchain.callbacks.base import AsyncCallbackManager from langchain.schema import ( @@ -24,7 +26,6 @@ OnLogs, OnInteractionRequest, ) -from models.base import ModelConfig, get_model from agent.base import AgentBase, AgentInteractionRequest, GetEnvs from session.playground import Playground @@ -103,15 +104,22 @@ async def create( on_interaction_request: OnInteractionRequest, ): callback_manager = AsyncCallbackManager([]) - new_config = ModelConfig(**config) - # Use default openai api key - new_config.args["openai_api_key"] = default_openai_api_key - - model = get_model(new_config, callback_manager, streaming=False) + model: BaseLanguageModel = ChatOpenAI( + temperature=0, + max_tokens=6000, + model_name=model_version, + openai_api_key=default_openai_api_key, + request_timeout=3600, + verbose=True, + # The max time between retries is 1 minute so we set max_retries to 45 + max_retries=45, + streaming=False, + callback_manager=callback_manager, + ) # type: ignore return cls( - new_config, + config, get_envs, set_run_id, on_logs, @@ -297,15 +305,6 @@ async def initialize_playground(): res = await playground.run_command(delete_command, rootdir) print("Delete command result: ", res.stdout, res.stderr) - # await self.on_logs( - # { - # "type": "Filesystem", - # "message": "", - # "properties": { - # "path": rootdir, - # }, - # } - # ) span.add_event( "files-deleted", { diff --git a/api-service/database/database.py b/api-service/database/database.py index d23f49d93..d5d008b32 100644 --- a/api-service/database/database.py +++ b/api-service/database/database.py @@ -1,7 +1,6 @@ import json from typing import Any, List -from agent.output.output_stream_parser import Step from database.client import Client from session.env import EnvVar @@ -49,7 +48,7 @@ async def update_deployment_logs( deployment_id: str, run_id: str | None, project_id: str, - logs: List[Step], + logs: List[Any], ): if run_id is None: return diff --git a/api-service/deployment/manager.py b/api-service/deployment/manager.py index 7c3139790..a8052a315 100644 --- a/api-service/deployment/manager.py +++ b/api-service/deployment/manager.py @@ -2,13 +2,14 @@ import uuid import aiohttp import json -from datetime import datetime - -from agent.output.work_queue import WorkQueue +from datetime import datetime from typing import Any, Callable, Coroutine, List from abc import abstractmethod, ABC +from .work_queue import WorkQueue + + from database.base import db from agent.base import ( AgentBase, diff --git a/api-service/deployment/work_queue.py b/api-service/deployment/work_queue.py new file mode 100644 index 000000000..29ed47a12 --- /dev/null +++ b/api-service/deployment/work_queue.py @@ -0,0 +1,50 @@ +from asyncio import Queue, ensure_future +from typing import Any, Callable, Coroutine, Generic, TypeVar + +from typing import Coroutine + +T = TypeVar("T") + + +class WorkQueue(Generic[T]): + """Queue that tries to always process only the most recently scheduled workload.""" + + def __init__(self, on_workload: Callable[[T], Coroutine[Any, Any, Any]]) -> None: + self._queue: Queue[Coroutine] = Queue() + self._on_workload = on_workload + # Start the worker that saves logs from queue to the db. + self._worker = ensure_future(self._start()) + + async def _work(self): + # Remove all logs except the newest one from the queue. + for _ in range(self._queue.qsize() - 1): + old_coro = self._queue.get_nowait() + try: + old_coro.close() + except Exception as e: + print(e) + finally: + self._queue.task_done() + + # Save the newest log to the db or wait until a log is pushed to the queue and then save it to the db. + task = await self._queue.get() + try: + await ensure_future(task) + except Exception as e: + print(e) + finally: + self._queue.task_done() + + async def _start(self): + while True: + await self._work() + + async def flush(self): + await self._queue.join() + + def schedule(self, workload: T): + task = self._on_workload(workload) + self._queue.put_nowait(task) + + def close(self): + self._worker.cancel() diff --git a/api-service/models/__init__.py b/api-service/models/__init__.py deleted file mode 100644 index 311f515a3..000000000 --- a/api-service/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .base import get_model, ModelConfig diff --git a/api-service/models/base.py b/api-service/models/base.py deleted file mode 100644 index 97736fbd5..000000000 --- a/api-service/models/base.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Dict, Any, List, Literal -from enum import Enum -from langchain.chat_models import ChatOpenAI -from langchain.schema import BaseLanguageModel -from langchain.callbacks.base import BaseCallbackManager -from pydantic import BaseModel - - -class ModelProvider(Enum): - OpenAI = "OpenAI" - - -class PromptPart(BaseModel): - role: Literal["user", "system"] - type: str - content: str - - -class ModelConfig(BaseModel): - # Provider is string and not ModelProvider because we deserialize it form request's JSON body - provider: str - prompt: List[PromptPart] - args: Dict[str, Any] - - -def get_model( - config: ModelConfig, - callback_manager: BaseCallbackManager, - streaming=True, -) -> BaseLanguageModel: - match config.provider: - case ModelProvider.OpenAI.value: - return ChatOpenAI( - **config.args, - request_timeout=3600, - verbose=True, - # The max time between retries is 1 minute so we set max_retries to 45 - max_retries=45, - streaming=streaming, - callback_manager=callback_manager, - ) - case _: - raise ValueError(f"Provider {config.provider} no found.") diff --git a/components/AgentInstructions.tsx b/components/AgentInstructions.tsx index 00cf2c264..ee41f714f 100644 --- a/components/AgentInstructions.tsx +++ b/components/AgentInstructions.tsx @@ -9,7 +9,7 @@ import { } from 'lucide-react' import smolTemplates from 'utils/smolTemplates' -import InstructionsEditor, { InstructionsEditorRef } from 'components/Editor/Template/NodeJSExpressTemplate/InstructionsEditor' +import InstructionsEditor, { InstructionsEditorRef } from 'components/InstructionsEditor' import InstructionsTemplateButton from 'components/InstructionsTemplateButton' import AlertError from 'components/AlertError' import { usePostHog } from 'posthog-js/react' @@ -68,7 +68,7 @@ function AgentInstructions({ Deploy Smol Developer

- What do you want the AI agent to build? + What do you want the AI agent to build?

{error && ( () - const router = useRouter() - const user = useUser() - - const emailRef = useRef(null) - const passwordRef = useRef(null) - - const authWithEmail = useCallback(async (email?: string, password?: string) => { - setIsLoading(true) - - email = email || emailRef.current?.value - password = password || passwordRef.current?.value - - if (!email) { - setErrMessage('Email must not be empty') - emailRef.current?.focus() - setIsLoading(false) - return - } - - if (!password) { - passwordRef.current?.focus() - setErrMessage('Password must not be empty') - setIsLoading(false) - return - } - - const { error, data: { user } } = - authType === AuthFormType.SignUp - ? await supabaseClient.auth.signUp({ - email, - password, - }) - : await supabaseClient.auth.signInWithPassword({ - email, - password, - }) - - if (error) { - emailRef.current?.focus() - setErrMessage(error.message) - console.error(error) - } else { - setErrMessage('') - router.replace('/') - } - - setIsLoading(false) - }, [supabaseClient, router, authType]) - - useEffect(function autoSignIn() { - if (process.env.NEXT_PUBLIC_SIGN_IN_EMAIL && process.env.NEXT_PUBLIC_SIGN_IN_PASSWORD) { - authWithEmail(process.env.NEXT_PUBLIC_SIGN_IN_EMAIL, process.env.NEXT_PUBLIC_SIGN_IN_PASSWORD) - } - }, [authWithEmail]) - - useEffect( - function autofocusEmailInput() { - if (isLoading) return - - if (!emailRef.current?.value) { - emailRef.current?.focus() - } else if (!passwordRef.current?.value) { - passwordRef.current?.focus() - } else { - emailRef.current?.focus() - } - }, - [isLoading], - ) - - - const title = authType === AuthFormType.SignUp ? 'Create a new account' : 'Sign in' - - const buttonLabel = authType === AuthFormType.SignUp ? 'Sign up' : 'Sign in' - - const buttonLoadingLabel = - authType === AuthFormType.SignUp ? 'Signing up...' : 'Signing in...' - - const passwordAutocomplete = - authType === AuthFormType.SignUp ? 'new-password' : 'current-password' - - return ( -
{ - e.preventDefault() - authWithEmail() - }} - > -
- -
-
- - -
-
-
-
-
-
- ) -} - -AuthForm.type = AuthFormType - -export default AuthForm \ No newline at end of file diff --git a/components/Button.tsx b/components/Button.tsx deleted file mode 100644 index f8b928a55..000000000 --- a/components/Button.tsx +++ /dev/null @@ -1,81 +0,0 @@ -import clsx from 'clsx' -import { - MouseEvent, - ReactNode, -} from 'react' - -import Text from 'components/Text' - -export enum Variant { - Full, - Outline, - Uncolored, -} - -export enum IconPosition { - Left, - Right, -} - -export interface Props { - className?: string - text?: string - variant?: Variant - icon?: ReactNode - onClick?: (e: MouseEvent) => any - isDisabled?: boolean - type?: 'submit' - iconPosition?: IconPosition -} - -function Button({ - className, - text, - variant = Variant.Outline, - icon, - onClick, - type, - isDisabled, - iconPosition = IconPosition.Left, -}: Props) { - return ( - - ) -} - -Button.variant = Variant -Button.iconPosition = IconPosition - - -export default Button diff --git a/components/DeleteButton.tsx b/components/DeleteButton.tsx deleted file mode 100644 index aaa022140..000000000 --- a/components/DeleteButton.tsx +++ /dev/null @@ -1,74 +0,0 @@ -import clsx from 'clsx' -import { useState, useEffect, MouseEvent } from 'react' - -import Text from './Text' - -export interface Props { - onDelete: () => void -} - -function DeleteButton({ onDelete }: Props) { - const [confirmDelete, setConfirmDelete] = useState(false) - - useEffect( - function expireConfirm() { - if (confirmDelete) { - const cleanup = setTimeout(() => setConfirmDelete(false), 4000) - return () => { - clearTimeout(cleanup) - } - } - }, - [confirmDelete], - ) - - function handleDelete( - e: MouseEvent, - ) { - e.stopPropagation() - e.preventDefault() - - if (confirmDelete) { - try { - onDelete() - } catch (err) { - const msg = err instanceof Error ? err.message : String(err) - console.error(`Error deleting item: ${msg}`) - } - } else { - setConfirmDelete(true) - } - } - - return ( - - ) -} - -export default DeleteButton diff --git a/components/DeployAgent.tsx b/components/DeployAgent.tsx index cb2f17404..1d382de24 100644 --- a/components/DeployAgent.tsx +++ b/components/DeployAgent.tsx @@ -4,7 +4,7 @@ import { ScrollText, } from 'lucide-react' -import InstructionsEditor from 'components/Editor/Template/NodeJSExpressTemplate/InstructionsEditor' +import InstructionsEditor from 'components/InstructionsEditor' import { RepoSetup } from 'utils/repoSetup' import SpinnerIcon from 'components/Spinner' diff --git a/components/Editor/Sidebar/Agent/AgentRunControls.tsx b/components/Editor/Sidebar/Agent/AgentRunControls.tsx deleted file mode 100644 index 44c4a6d14..000000000 --- a/components/Editor/Sidebar/Agent/AgentRunControls.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import { ReactNode, memo, useMemo, useCallback } from 'react' -import { Ban, Pause, Play } from 'lucide-react' - -import Button from 'components/Button' -import { AgentConnection, AgentRunState } from 'api-client/AgentConnection' - - -export interface Props { - agentState?: AgentRunState - run: () => void - agentRun?: AgentConnection - disabled?: boolean -} - -function AgentRunControls({ - agentRun, - disabled, - agentState, - run, -}: Props) { - const cancel = useCallback(async () => { - await agentRun?.cancelRun() - }, [agentRun]) - - const pause = useCallback(async () => { - await agentRun?.pauseRun() - }, [agentRun]) - - const resume = useCallback(async () => { - await agentRun?.resumeRun() - }, [agentRun]) - - const { action, icon, text } = useMemo<{ - text: string - icon: ReactNode | null - action: () => void - }>(() => { - switch (agentState) { - case AgentRunState.Running: - return { - text: 'Running', - icon: , - action: pause, - } - case AgentRunState.Paused: - return { - text: 'Resume', - icon: , - action: resume, - } - default: - return { - icon: , - text: 'Run', - action: run, - } - } - }, [agentState, pause, run, resume]) - - return ( - <> - {agentRun && agentState && -