Skip to content

Commit

Permalink
WebSocket interface and basic server.py process (#399)
Browse files Browse the repository at this point in the history
  • Loading branch information
cpacker authored Nov 14, 2023
1 parent 80aa8c9 commit 05ffceb
Show file tree
Hide file tree
Showing 15 changed files with 954 additions and 195 deletions.
6 changes: 3 additions & 3 deletions memgpt/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext

import memgpt.interface # for printing to terminal
from memgpt.interface import CLIInterface as interface # for printing to terminal
from memgpt.cli.cli_config import configure
import memgpt.agent as agent
import memgpt.system as system
Expand Down Expand Up @@ -128,7 +128,7 @@ def run(
agent_config.save()

# load existing agent
memgpt_agent = Agent.load_agent(memgpt.interface, agent_config)
memgpt_agent = Agent.load_agent(interface, agent_config)
else: # create new agent
# create new agent config: override defaults with args if provided
typer.secho("Creating new agent...", fg=typer.colors.GREEN)
Expand Down Expand Up @@ -158,7 +158,7 @@ def run(
agent_config.model,
utils.get_persona_text(agent_config.persona),
utils.get_human_text(agent_config.human),
memgpt.interface,
interface,
persistence_manager,
)

Expand Down
6 changes: 4 additions & 2 deletions memgpt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from typing import List, Type

import memgpt.utils as utils
import memgpt.interface as interface
from memgpt.interface import CLIInterface as interface
from memgpt.personas.personas import get_persona_text
from memgpt.humans.humans import get_human_text
from memgpt.constants import MEMGPT_DIR, LLM_MAX_TOKENS
Expand Down Expand Up @@ -109,7 +109,9 @@ def load(cls) -> "MemGPTConfig":
# read config values
model = config.get("defaults", "model")
context_window = (
config.get("defaults", "context_window") if config.has_option("defaults", "context_window") else LLM_MAX_TOKENS["DEFAULT"]
int(config.get("defaults", "context_window"))
if config.has_option("defaults", "context_window")
else LLM_MAX_TOKENS["DEFAULT"]
)
preset = config.get("defaults", "preset")
model_endpoint = config.get("defaults", "model_endpoint")
Expand Down
Loading

0 comments on commit 05ffceb

Please sign in to comment.