Skip to content

Commit

Permalink
WIP register_models() plugin hook, refs #53
Browse files Browse the repository at this point in the history
  • Loading branch information
simonw committed Jul 10, 2023
1 parent 747a6c3 commit aed5f29
Show file tree
Hide file tree
Showing 6 changed files with 245 additions and 42 deletions.
1 change: 1 addition & 0 deletions llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Optional
from .hookspecs import hookimpl # noqa
from .hookspecs import hookspec # noqa
from .models import Model, Prompt, Response, OptionsError # noqa


class Template(BaseModel):
Expand Down
115 changes: 73 additions & 42 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import json
from llm import Template
from .migrations import migrate
from .plugins import pm, get_plugins
from .plugins import pm, get_plugins, get_model_aliases, get_models_with_aliases
import openai
import os
import pathlib
Expand All @@ -22,16 +22,6 @@

DEFAULT_MODEL = "gpt-3.5-turbo"

MODEL_ALIASES = {
"4": "gpt-4",
"gpt4": "gpt-4",
"4-32k": "gpt-4-32k",
"chatgpt": "gpt-3.5-turbo",
"3.5": "gpt-3.5-turbo",
"chatgpt-16k": "gpt-3.5-turbo-16k",
"3.5-16k": "gpt-3.5-turbo-16k",
}

DEFAULT_TEMPLATE = "prompt: "


Expand Down Expand Up @@ -62,7 +52,7 @@ def cli():
@cli.command(name="prompt")
@click.argument("prompt", required=False)
@click.option("--system", help="System prompt to use")
@click.option("-m", "--model", help="Model to use")
@click.option("model_id", "-m", "--model", help="Model to use")
@click.option("-t", "--template", help="Template to use")
@click.option(
"-p",
Expand Down Expand Up @@ -92,7 +82,7 @@ def cli():
def prompt(
prompt,
system,
model,
model_id,
template,
param,
no_stream,
Expand All @@ -116,6 +106,8 @@ def prompt(
# Hang waiting for input to stdin (unless --save)
prompt = sys.stdin.read()

model_aliases = get_model_aliases()

if save:
# We are saving their prompt/system/etc to a new template
# Fields to save: prompt, system, model - and more in the future
Expand All @@ -133,8 +125,11 @@ def prompt(
)
path = template_dir() / f"{save}.yaml"
to_save = {}
if model:
to_save["model"] = MODEL_ALIASES.get(model, model)
if model_id:
try:
to_save["model"] = model_aliases[model_id].model_id
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
if prompt:
to_save["prompt"] = prompt
if system:
Expand All @@ -151,8 +146,6 @@ def prompt(
)
return

openai.api_key = get_key(key, "openai", "OPENAI_API_KEY")

if template:
params = dict(param)
# Cannot be used with system
Expand All @@ -163,33 +156,55 @@ def prompt(
prompt, system = template_obj.execute(prompt, params)
except Template.MissingVariables as ex:
raise click.ClickException(str(ex))
if model is None and template_obj.model:
model = template_obj.model

messages = []
if _continue:
_continue = -1
if chat_id:
raise click.ClickException("Cannot use --continue and --chat together")
else:
_continue = chat_id
chat_id, history = get_history(_continue)
if model_id is None and template_obj.model:
model_id = template_obj.model

history_model = None
if history:
for entry in history:
if entry.get("system"):
messages.append({"role": "system", "content": entry["system"]})
messages.append({"role": "user", "content": entry["prompt"]})
messages.append({"role": "assistant", "content": entry["response"]})
history_model = entry["model"]
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": prompt})
if model is None:
model = history_model or DEFAULT_MODEL
# TODO: Re-introduce --continue mode
# messages = []
# if _continue:
# _continue = -1
# if chat_id:
# raise click.ClickException("Cannot use --continue and --chat together")
# else:
# _continue = chat_id
# chat_id, history = get_history(_continue)
# history_model = None
# if history:
# for entry in history:
# if entry.get("system"):
# messages.append({"role": "system", "content": entry["system"]})
# messages.append({"role": "user", "content": entry["prompt"]})
# messages.append({"role": "assistant", "content": entry["response"]})
# history_model = entry["model"]

# Figure out which model we are using
if model_id is None:
model_id = history_model or DEFAULT_MODEL

# Now resolve the model
try:
model = model_aliases[model_id]
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))

# TODO: Only do this for OpenAI models
openai.api_key = get_key(key, "openai", "OPENAI_API_KEY")

if no_stream:
chunk = list(model.prompt(prompt, system, stream=False))[0]
print(chunk)
else:
# Resolve model aliases
model = MODEL_ALIASES.get(model, model)
for chunk in model.prompt(prompt, system):
print(chunk, end="")
sys.stdout.flush()
print("")

# TODO: Figure out OpenAI exception handling
# TODO: Log to database

return
# Original code:
try:
debug = {}
if no_stream:
Expand Down Expand Up @@ -330,6 +345,22 @@ def logs_list(count, path, truncate):
click.echo(json.dumps(list(rows), indent=2))


@cli.group()
def models():
"Manage available models"


@models.command(name="list")
def models_list():
"List available models"
for model_with_aliases in get_models_with_aliases():
extra = ""
if model_with_aliases.aliases:
extra = " (aliases: {})".format(", ".join(model_with_aliases.aliases))
output = str(model_with_aliases.model) + extra
click.echo(output)


@cli.group()
def templates():
"Manage stored prompt templates"
Expand Down
5 changes: 5 additions & 0 deletions llm/hookspecs.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,8 @@
@hookspec
def register_commands(cli):
"""Register additional CLI commands, e.g. 'llm mycommand ...'"""


@hookspec
def register_models(register):
"Return a list of model instances representing LLM models that can be called"
80 changes: 80 additions & 0 deletions llm/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from dataclasses import dataclass
from typing import Any, Dict, Generator, Optional, Set
from abc import ABC, abstractmethod
from pydantic import BaseModel


@dataclass
class Prompt:
prompt: str
model: "Model"
system: Optional[str]
prompt_json: Optional[str]
options: Dict[str, Any]

def __init__(self, prompt, model, system=None, prompt_json=None, options=None):
self.prompt = prompt
self.model = model
self.system = system
self.prompt_json = prompt_json
self.options = options or {}


class OptionsError(Exception):
pass


class Response(ABC):
def __init__(self, prompt: Prompt):
self.prompt = prompt
self._chunks = []
self._debug = {}
self._done = False

def __iter__(self):
if self._done:
return self._chunks
for chunk in self.iter_prompt():
yield chunk
self._chunks.append(chunk)
self._done = True

@abstractmethod
def iter_prompt(self) -> Generator[str, None, None]:
pass

def _force(self):
if not self._done:
list(self)

def text(self):
self._force()
return "".join(self._chunks)


class Model(ABC):
model_id: str

class Options(BaseModel):
class Config:
extra = "forbid"

def prompt(self, prompt, system=None, stream=True, **options):
return self.execute(
Prompt(prompt, system=system, model=self, options=self.Options(**options)),
stream=stream,
)

@abstractmethod
def execute(self, prompt: Prompt, stream: bool = True) -> Response:
pass

@abstractmethod
def __str__(self) -> str:
pass


@dataclass
class ModelWithAliases:
model: Model
aliases: Set[str]
58 changes: 58 additions & 0 deletions llm/openai_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from . import Model, Prompt, OptionsError, Response, hookimpl
from typing import Optional
import openai


@hookimpl
def register_models(register):
register(Chat("gpt-3.5-turbo"), aliases=("3.5", "chatgpt"))
register(Chat("gpt-3.5-turbo-16k"), aliases=("chatgpt-16k", "3.5-16k"))
register(Chat("gpt-4"), aliases=("4", "gpt4"))
register(Chat("gpt-4-32k"), aliases=("4-32k",))


class ChatResponse(Response):
def __init__(self, prompt, stream):
self.prompt = prompt
self.stream = stream
super().__init__(prompt)

def iter_prompt(self):
messages = []
if self.prompt.system:
messages.append({"role": "system", "content": self.prompt.system})
messages.append({"role": "user", "content": self.prompt.prompt})
if self.stream:
for chunk in openai.ChatCompletion.create(
model=self.prompt.model.model_id,
messages=messages,
stream=True,
):
self._debug["model"] = chunk.model
content = chunk["choices"][0].get("delta", {}).get("content")
if content is not None:
yield content
self._done = True
else:
response = openai.ChatCompletion.create(
model=self.prompt.model.model_id,
messages=messages,
stream=False,
)
self._debug["model"] = response.model
self._debug["usage"] = response.usage
content = response.choices[0].message.content
self._done = True
yield content


class Chat(Model):
def __init__(self, model_id, stream=True):
self.model_id = model_id
self.stream = stream

def execute(self, prompt: Prompt, stream: bool = True) -> ChatResponse:
return ChatResponse(prompt, stream)

def __str__(self):
return "OpenAI Chat: {}".format(self.model_id)
28 changes: 28 additions & 0 deletions llm/plugins.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
import importlib
import pluggy
import sys
from typing import Dict, List
from . import hookspecs
from .models import ModelWithAliases, Model

DEFAULT_PLUGINS = ("llm.openai_models",)

pm = pluggy.PluginManager("llm")
pm.add_hookspecs(hookspecs)
Expand All @@ -9,6 +14,10 @@
# Only load plugins if not running tests
pm.load_setuptools_entrypoints("llm")

for plugin in DEFAULT_PLUGINS:
mod = importlib.import_module(plugin)
pm.register(mod, plugin)


def get_plugins():
plugins = []
Expand All @@ -24,3 +33,22 @@ def get_plugins():
plugin_info["name"] = distinfo.project_name
plugins.append(plugin_info)
return plugins


def get_models_with_aliases() -> List[ModelWithAliases]:
model_aliases = []

def register(model, aliases=None):
model_aliases.append(ModelWithAliases(model, aliases or set()))

pm.hook.register_models(register=register)
return model_aliases


def get_model_aliases() -> Dict[str, Model]:
model_aliases = {}
for model_with_aliases in get_models_with_aliases():
for alias in model_with_aliases.aliases:
model_aliases[alias] = model_with_aliases.model
model_aliases[model_with_aliases.model.model_id] = model_with_aliases.model
return model_aliases

0 comments on commit aed5f29

Please sign in to comment.