Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow using custom LLM providers in chat APIs #25

Merged
merged 4 commits into from
Jul 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
QSTASH_TOKEN="YOUR_TOKEN"
QSTASH_CURRENT_SIGNING_KEY="<YOUR_CURRENT_SIGNING_KEY>"
QSTASH_NEXT_SIGNING_KEY="<YOUR_NEXT_SIGNING_KEY>"
QSTASH_NEXT_SIGNING_KEY="<YOUR_NEXT_SIGNING_KEY>"
OPENAI_API_KEY = "<YOUR_OPENAI_API_KEY>"
3 changes: 2 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,5 @@ jobs:
export QSTASH_TOKEN="${{ secrets.QSTASH_TOKEN }}"
export QSTASH_CURRENT_SIGNING_KEY="${{ secrets.QSTASH_CURRENT_SIGNING_KEY }}"
export QSTASH_NEXT_SIGNING_KEY="${{ secrets.QSTASH_NEXT_SIGNING_KEY }}"
poetry run pytest
export OPENAI_API_KEY="${{ secrets.OPENAI_API_KEY }}"
poetry run pytest
76 changes: 75 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
> [!NOTE]
> **This project is in GA Stage.**
>
> The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
> The Upstash Professional Support fully covers this project. It receives regular updates, and bug fixes.
> The Upstash team is committed to maintaining and improving its functionality.

**QStash** is an HTTP based messaging and scheduling solution for serverless and edge runtimes.
Expand Down Expand Up @@ -73,11 +73,13 @@ receiver.verify(

```python
from upstash_qstash import QStash
from upstash_qstash.chat import upstash

qstash = QStash("<QSTASH_TOKEN>")

res = qstash.chat.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
provider=upstash(),
messages=[
{
"role": "user",
Expand All @@ -89,6 +91,78 @@ res = qstash.chat.create(
print(res.choices[0].message.content)
```

#### Create Chat Completions Using Custom Providers

```python
from upstash_qstash import QStash
from upstash_qstash.chat import openai

qstash = QStash("<QSTASH_TOKEN>")

res = qstash.chat.create(
model="gpt-3.5-turbo",
provider=openai("<OPENAI_API_KEY>"),
messages=[
{
"role": "user",
"content": "What is the capital of Turkey?",
}
],
)

print(res.choices[0].message.content)
```

#### Publish a JSON message to LLM

```python
from upstash_qstash import QStash
from upstash_qstash.chat import upstash

qstash = QStash("<QSTASH_TOKEN>")

res = qstash.message.publish_json(
api={"name": "llm", "provider": upstash()},
body={
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
"messages": [
{
"role": "user",
"content": "What is the capital of Turkey?",
}
],
},
callback="https://example-cb.com",
)

print(res.message_id)
```

#### Publish a JSON message to LLM Using Custom Providers

```python
from upstash_qstash import QStash
from upstash_qstash.chat import openai

qstash = QStash("<QSTASH_TOKEN>")

res = qstash.message.publish_json(
api={"name": "llm", "provider": openai("<OPENAI_API_KEY>")},
body={
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "What is the capital of Turkey?",
}
],
},
callback="https://example-cb.com",
)

print(res.message_id)
```

#### Additional configuration

```python
Expand Down
3 changes: 2 additions & 1 deletion examples/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"""

from upstash_qstash import QStash
from upstash_qstash.chat import upstash


def main():
Expand All @@ -14,7 +15,7 @@ def main():
)

qstash.message.publish_json(
api="llm",
api={"name": "llm", "provider": upstash()},
body={
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
"messages": [
Expand Down
5 changes: 5 additions & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@
dotenv.dotenv_values().get("QSTASH_NEXT_SIGNING_KEY"),
)

OPENAI_API_KEY = os.environ.get(
"OPENAI_API_KEY",
dotenv.dotenv_values().get("OPENAI_API_KEY"),
)


def assert_eventually(
assertion: Callable[[], None],
Expand Down
166 changes: 161 additions & 5 deletions tests/asyncio/test_chat.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
import pytest

from tests import OPENAI_API_KEY
from upstash_qstash import AsyncQStash
from upstash_qstash.asyncio.chat import AsyncChatCompletionChunkStream
from upstash_qstash.chat import ChatCompletion
from upstash_qstash.chat import ChatCompletion, upstash, openai


@pytest.mark.asyncio
async def test_chat_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[{"role": "user", "content": "hello"}],
messages=[{"role": "user", "content": "just say hello"}],
)

assert isinstance(res, ChatCompletion)
Expand All @@ -21,7 +23,7 @@ async def test_chat_async(async_qstash: AsyncQStash) -> None:
async def test_chat_streaming_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[{"role": "user", "content": "hello"}],
messages=[{"role": "user", "content": "just say hello"}],
stream=True,
)

Expand All @@ -41,7 +43,7 @@ async def test_chat_streaming_async(async_qstash: AsyncQStash) -> None:
async def test_prompt_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.prompt(
model="meta-llama/Meta-Llama-3-8B-Instruct",
user="hello",
user="just say hello",
)

assert isinstance(res, ChatCompletion)
Expand All @@ -54,7 +56,44 @@ async def test_prompt_async(async_qstash: AsyncQStash) -> None:
async def test_prompt_streaming_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.prompt(
model="meta-llama/Meta-Llama-3-8B-Instruct",
user="hello",
user="just say hello",
stream=True,
)

assert isinstance(res, AsyncChatCompletionChunkStream)

i = 0
async for r in res:
if i == 0:
assert r.choices[0].delta.role is not None
else:
assert r.choices[0].delta.content is not None

i += 1


@pytest.mark.asyncio
async def test_chat_explicit_upstash_provider_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[{"role": "user", "content": "just say hello"}],
provider=upstash(),
)

assert isinstance(res, ChatCompletion)

assert len(res.choices[0].message.content) > 0
assert res.choices[0].message.role == "assistant"


@pytest.mark.asyncio
async def test_chat_explicit_upstash_provider_streaming_async(
async_qstash: AsyncQStash,
) -> None:
res = await async_qstash.chat.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[{"role": "user", "content": "just say hello"}],
provider=upstash(),
stream=True,
)

Expand All @@ -68,3 +107,120 @@ async def test_prompt_streaming_async(async_qstash: AsyncQStash) -> None:
assert r.choices[0].delta.content is not None

i += 1


@pytest.mark.asyncio
async def test_prompt_explicit_upstash_provider_async(
async_qstash: AsyncQStash,
) -> None:
res = await async_qstash.chat.prompt(
model="meta-llama/Meta-Llama-3-8B-Instruct",
user="just say hello",
provider=upstash(),
)

assert isinstance(res, ChatCompletion)

assert len(res.choices[0].message.content) > 0
assert res.choices[0].message.role == "assistant"


@pytest.mark.asyncio
async def test_prompt_explicit_upstash_provider_streaming_async(
async_qstash: AsyncQStash,
) -> None:
res = await async_qstash.chat.prompt(
model="meta-llama/Meta-Llama-3-8B-Instruct",
user="just say hello",
provider=upstash(),
stream=True,
)

assert isinstance(res, AsyncChatCompletionChunkStream)

i = 0
async for r in res:
if i == 0:
assert r.choices[0].delta.role is not None
else:
assert r.choices[0].delta.content is not None

i += 1


@pytest.mark.asyncio
async def test_chat_custom_provider_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "just say hello"}],
provider=openai(token=OPENAI_API_KEY), # type:ignore[arg-type]
)

assert isinstance(res, ChatCompletion)

assert len(res.choices[0].message.content) > 0
assert res.choices[0].message.role == "assistant"


@pytest.mark.asyncio
async def test_chat_custom_provider_streaming_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "just say hello"}],
provider=openai(token=OPENAI_API_KEY), # type:ignore[arg-type]
stream=True,
)

assert isinstance(res, AsyncChatCompletionChunkStream)

i = 0
async for r in res:
if i == 0:
assert r.choices[0].delta.role is not None
else:
assert (
r.choices[0].delta.content is not None
or r.choices[0].finish_reason is not None
)

i += 1


@pytest.mark.asyncio
async def test_prompt_custom_provider_async(async_qstash: AsyncQStash) -> None:
res = await async_qstash.chat.prompt(
model="gpt-3.5-turbo",
user="just say hello",
provider=openai(token=OPENAI_API_KEY), # type:ignore[arg-type]
)

assert isinstance(res, ChatCompletion)

assert len(res.choices[0].message.content) > 0
assert res.choices[0].message.role == "assistant"


@pytest.mark.asyncio
async def test_prompt_custom_provider_streaming_async(
async_qstash: AsyncQStash,
) -> None:
res = await async_qstash.chat.prompt(
model="gpt-3.5-turbo",
user="just say hello",
provider=openai(token=OPENAI_API_KEY), # type:ignore[arg-type]
stream=True,
)

assert isinstance(res, AsyncChatCompletionChunkStream)

i = 0
async for r in res:
if i == 0:
assert r.choices[0].delta.role is not None
else:
assert (
r.choices[0].delta.content is not None
or r.choices[0].finish_reason is not None
)

i += 1
Loading
Loading