Skip to content

Commit

Permalink
Merge branch 'main' into dr_log10feedback_documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
nqn authored Apr 11, 2024
2 parents e6f0de3 + 7699da8 commit 3476051
Show file tree
Hide file tree
Showing 14 changed files with 762 additions and 107 deletions.
23 changes: 23 additions & 0 deletions examples/logging/litellm_async_stream_completion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import asyncio

import litellm

from log10.litellm import Log10LitellmLogger


log10_handler = Log10LitellmLogger(tags=["litellm_acompletion"])
litellm.callbacks = [log10_handler]

model_name = "claude-3-haiku-20240307"


async def completion():
response = await litellm.acompletion(
model=model_name, messages=[{"role": "user", "content": "count to 10"}], stream=True
)
async for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)


asyncio.run(completion())
13 changes: 13 additions & 0 deletions examples/logging/litellm_completion_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import litellm

from log10.litellm import Log10LitellmLogger


log10_handler = Log10LitellmLogger(tags=["litellm_completion", "stream"])
litellm.callbacks = [log10_handler]
response = litellm.completion(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Count to 10."}], stream=True
)
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
34 changes: 34 additions & 0 deletions examples/logging/litellm_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import base64

import httpx
import litellm

from log10.litellm import Log10LitellmLogger


log10_handler = Log10LitellmLogger(tags=["litellm_image"])
litellm.callbacks = [log10_handler]

image_url = "https://upload.wikimedia.org/wikipedia/commons/e/e8/Log10.png"
image_media_type = "image/png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")


model_name = ("gpt-4-vision-preview",)
model_name = "claude-3-haiku-20240307"
resp = litellm.completion(
model=model_name,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:{image_media_type};base64,{image_data}"},
},
{"type": "text", "text": "What's the red curve in the figure, is it log2 or log10? Be concise."},
],
}
],
)
print(resp.choices[0].message.content)
42 changes: 42 additions & 0 deletions examples/logging/litellm_image_async_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import asyncio
import base64

import httpx
import litellm

from log10.litellm import Log10LitellmLogger


log10_handler = Log10LitellmLogger(tags=["litellm_image", "stream", "async"])
litellm.callbacks = [log10_handler]

image_url = "https://upload.wikimedia.org/wikipedia/commons/e/e8/Log10.png"
image_media_type = "image/png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")

model_name = "claude-3-haiku-20240307"


async def completion():
resp = litellm.completion(
model=model_name,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:{image_media_type};base64,{image_data}"},
},
{"type": "text", "text": "What's the red curve in the figure, is it log2 or log10? Be concise."},
],
}
],
stream=True,
)
for chunk in resp:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)


asyncio.run(completion())
35 changes: 35 additions & 0 deletions examples/logging/litellm_image_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import base64

import httpx
import litellm

from log10.litellm import Log10LitellmLogger


log10_handler = Log10LitellmLogger(tags=["litellm_image", "stream"])
litellm.callbacks = [log10_handler]

image_url = "https://upload.wikimedia.org/wikipedia/commons/e/e8/Log10.png"
image_media_type = "image/png"
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")

model_name = "claude-3-haiku-20240307"
resp = litellm.completion(
model=model_name,
messages=[
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:{image_media_type};base64,{image_data}"},
},
{"type": "text", "text": "What's the red curve in the figure, is it log2 or log10? Be concise."},
],
}
],
stream=True,
)
for chunk in resp:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
24 changes: 24 additions & 0 deletions examples/logging/mistralai_chat_no_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import mistralai
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage

from log10.load import log10


log10(mistralai)


def main():
model = "mistral-tiny"

client = MistralClient()

chat_response = client.chat(
model=model,
messages=[ChatMessage(role="user", content="10 + 2 * 3=?")],
)
print(chat_response.choices[0].message.content)


if __name__ == "__main__":
main()
27 changes: 27 additions & 0 deletions examples/logging/mistralai_chat_with_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import mistralai
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage

from log10.load import log10


log10(mistralai)


def main():
model = "mistral-tiny"

client = MistralClient()

response = client.chat_stream(
model=model,
messages=[ChatMessage(role="user", content="count the odd numbers from 1 to 20.")],
)
# import ipdb; ipdb.set_trace()
for chunk in response:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")


if __name__ == "__main__":
main()
40 changes: 29 additions & 11 deletions log10/_httpx_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import logging
import time
import traceback
import uuid
from datetime import datetime, timezone

import httpx
Expand All @@ -17,6 +18,7 @@
_log10_config = Log10Config()
base_url = _log10_config.url
httpx_client = httpx.Client()
httpx_async_client = httpx.AsyncClient()


def _get_time_diff(created_at):
Expand Down Expand Up @@ -83,19 +85,35 @@ def _try_post_request(url: str, payload: dict = {}) -> httpx.Response:
logger.error(f"Failed to insert in log10: {payload} with error {err}")


async def _try_post_request_async(url: str, payload: dict = {}) -> httpx.Response:
headers = {
"x-log10-token": _log10_config.token,
"x-log10-organization-id": _log10_config.org_id,
"Content-Type": "application/json",
}
payload["organization_id"] = _log10_config.org_id
try:
res = await httpx_async_client.post(url, headers=headers, json=payload)
res.raise_for_status()
return res
except httpx.HTTPStatusError as http_err:
if "401" in str(http_err):
logger.error(
"Failed authorization. Please verify that LOG10_TOKEN and LOG10_ORG_ID are set correctly and try again."
+ "\nSee https://github.com/log10-io/log10#%EF%B8%8F-setup for details"
)
else:
logger.error(f"Failed with error: {http_err}")
except Exception as err:
logger.error(f"Failed to insert in log10: {payload} with error {err}")


async def get_completion_id(request: Request):
if "v1/chat/completions" not in str(request.url):
logger.warning("Currently logging is only available for v1/chat/completions.")
return

completion_url = "/api/completions"
res = _try_post_request(url=f"{base_url}{completion_url}")
try:
completion_id = res.json().get("completionID")
except Exception as e:
logger.error(f"Failed to get completion ID. Error: {e}. Skipping completion recording.")
else:
request.headers["x-log10-completion-id"] = completion_id
request.headers["x-log10-completion-id"] = str(uuid.uuid4())


async def log_request(request: Request):
Expand Down Expand Up @@ -125,7 +143,7 @@ async def log_request(request: Request):
}
if get_log10_session_tags():
log_row["tags"] = get_log10_session_tags()
_try_post_request(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)
await _try_post_request_async(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)


class _LogResponse(Response):
Expand Down Expand Up @@ -205,7 +223,7 @@ async def aiter_bytes(self, *args, **kwargs):
}
if get_log10_session_tags():
log_row["tags"] = get_log10_session_tags()
_try_post_request(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)
await _try_post_request_async(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)


class _LogTransport(httpx.AsyncBaseTransport):
Expand Down Expand Up @@ -246,7 +264,7 @@ async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
}
if get_log10_session_tags():
log_row["tags"] = get_log10_session_tags()
_try_post_request(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)
await _try_post_request_async(url=f"{base_url}/api/completions/{completion_id}", payload=log_row)
return response
elif response.headers.get("content-type") == "text/event-stream":
return _LogResponse(
Expand Down
2 changes: 1 addition & 1 deletion log10/feedback/feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def create(
res = self._post_request(self.feedback_create_url, json_payload)
return res

def list(self, offset: int = 0, limit: int = 50, task_id: str = None) -> httpx.Response:
def list(self, offset: int = 0, limit: int = 50, task_id: str = "") -> httpx.Response:
base_url = self._log10_config.url
api_url = "/api/v1/feedback"
url = f"{base_url}{api_url}?organization_id={self._log10_config.org_id}&offset={offset}&limit={limit}&task_id={task_id}"
Expand Down
Loading

0 comments on commit 3476051

Please sign in to comment.