Skip to content

Commit

Permalink
fix(langchain): support model-specific packages
Browse files Browse the repository at this point in the history
  • Loading branch information
nirga committed May 6, 2024
1 parent 4949f91 commit 8e9c826
Show file tree
Hide file tree
Showing 10 changed files with 652 additions and 457 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def _handle_request(span, args, kwargs, instance):
)


@dont_throw
def _handle_response(span, return_value):
if should_send_prompts():
for idx, generation in enumerate(return_value.generations):
Expand Down
1,026 changes: 597 additions & 429 deletions packages/opentelemetry-instrumentation-langchain/poetry.lock

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pytest-sugar = "1.0.0"
[tool.poetry.group.test.dependencies]
langchain = "^0.1.6"
langchain-community = ">=0.0.19,<0.0.30"
openai = "^1.12.0"
openai = "^1.26.0"
pytest = "8.1.1"
pytest-sugar = "1.0.0"
vcrpy = "^6.0.1"
Expand All @@ -48,8 +48,11 @@ pytest-asyncio = "^0.23.5"
opentelemetry-sdk = "^1.23.0"
opentelemetry-instrumentation-openai = {path="../opentelemetry-instrumentation-openai", develop=true}
text-generation = "^0.7.0"
anthropic = "^0.21.3"
anthropic = "^0.23.0"
boto3 = "1.34.88"
langchain-anthropic = "^0.1.11"
langchain-openai = "^0.1.6"
pydantic = "^2.7.1"

[build-system]
requires = ["poetry-core"]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
interactions:
- request:
body: '{"max_tokens_to_sample": 256, "model": "claude-2", "prompt": "You are helpful
assistant\n\nHuman: tell me a short joke\n\nAssistant:"}'
body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "tell me
a short joke"}], "model": "claude-2.1", "system": "You are a helpful assistant"}'
headers:
accept:
- application/json
Expand All @@ -12,13 +12,13 @@ interactions:
connection:
- keep-alive
content-length:
- '134'
- '151'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.21.3
- Anthropic/Python 0.23.1
x-stainless-arch:
- arm64
x-stainless-async:
Expand All @@ -28,43 +28,44 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.21.3
- 0.23.1
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.1
method: POST
uri: https://api.anthropic.com/v1/complete
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAA4yPsQoCMRBEf2XdxiaKdwriNYJYWImFehbCEZM9DeSSaDbKIf67KAqWljPwHjN3
5DYQFqh8Eyyx8Q4FGv1tqkG2zid5s1oMl7Gc16PruOTtZrVD8YsUCOWpBSVdl0HCwahWWYLI0mlI
AQ4tGI5k6ynMSMkUCQx3I/DN99hcSHdQYGQfqgvJ+Ba+U6RzIqcIBTZek33NsjJp6uX97INggXu3
d4vUSFegQOuP1T8HHk8AAAD//wMAyuHvxvwAAAA=
H4sIAAAAAAAAA0yOQUvDQBSE/8o6l162YnKR7kUoRQ+iBxU8iITN5rVd3L6NeW/REPLfJUXB08DH
MPNNiB0cTnJorqrr/cPd5vlpd/uyPYZdFVt9rPgeFjr2tLRIxB8IFkNOC/AiUdSzwuKUO0pwCMmX
jtb1ZQUL0dw3Qp+FOBAcl5QsynnETYjcF200fxALXLWxyEX/o7qeLUJmJVa4t+lPROl7uTyHw+tx
NMHzSo03bQxjSGQWq86U3rSjiSqU9jdmS8EXIRN1JUa/8lrjQN0F5vdf0YG8ZIYDcddoGRjzDwAA
AP//AwA2hEUeIAEAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 86ea44843f83a25f-FCO
- 87fa0a2f4b0aa262-FCO
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 03 Apr 2024 15:59:46 GMT
- Mon, 06 May 2024 15:35:23 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
request-id:
- req_01DTh7TraXq8NmQxhiz1yaBU
- req_012wguUscCHQRvnAiUd8QZva
via:
- 1.1 google
x-cloud-trace-context:
- 0d9db74e18924fe257bdf146f49bd918
- 96a20b8ab03b5b4d4f4ab009279e6fe8
status:
code: 200
message: OK
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import pytest
from langchain.prompts import PromptTemplate
from langchain_community.llms.openai import OpenAI
from langchain import LLMChain
from langchain.chains import SequentialChain
from langchain.chains import SequentialChain, LLMChain
from langchain_openai import OpenAI


@pytest.mark.vcr
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
from langchain_community.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
from langchain_community.chat_models import BedrockChat, ChatOpenAI, ChatAnthropic
from langchain_community.chat_models import BedrockChat
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_core.pydantic_v1 import BaseModel, Field
import boto3

Expand Down Expand Up @@ -232,7 +234,8 @@ def test_openai(exporter):
assert openai_span.attributes["llm.request.type"] == "chat"
assert openai_span.attributes["gen_ai.request.model"] == "gpt-3.5-turbo"
assert (
openai_span.attributes["gen_ai.prompt.0.content"] == "You are a helpful assistant"
openai_span.attributes["gen_ai.prompt.0.content"]
== "You are a helpful assistant"
)
assert openai_span.attributes["gen_ai.prompt.0.role"] == "system"
assert (
Expand All @@ -251,7 +254,7 @@ def test_anthropic(exporter):
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful assistant"), ("user", "{input}")]
)
model = ChatAnthropic(model="claude-2")
model = ChatAnthropic(model="claude-2.1")

chain = prompt | model
response = chain.invoke({"input": "tell me a short joke"})
Expand All @@ -269,7 +272,7 @@ def test_anthropic(exporter):
)

assert anthropic_span.attributes["llm.request.type"] == "chat"
assert anthropic_span.attributes["gen_ai.request.model"] == "claude-2"
assert anthropic_span.attributes["gen_ai.request.model"] == "claude-2.1"
assert (
anthropic_span.attributes["gen_ai.prompt.0.content"]
== "You are a helpful assistant"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from contextlib import asynccontextmanager, contextmanager
import json
import logging
import time
Expand Down Expand Up @@ -396,6 +397,7 @@ def _set_streaming_token_metrics(


@dont_throw
@contextmanager
def _build_from_streaming_response(
span,
response,
Expand Down Expand Up @@ -464,6 +466,7 @@ def _build_from_streaming_response(


@dont_throw
@asynccontextmanager
async def _abuild_from_streaming_response(
span,
response,
Expand Down
24 changes: 20 additions & 4 deletions packages/sample-app/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions packages/sample-app/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ llama-index-embeddings-huggingface = "^0.2.0"
litellm = "^1.34.42"
text-generation = "^0.7.0"
llama-index-vector-stores-chroma = "^0.1.6"
langchain-openai = "^0.1.6"

[tool.poetry.dependencies.opentelemetry-instrumentation-openai]
path = "../opentelemetry-instrumentation-openai"
Expand Down
2 changes: 1 addition & 1 deletion packages/sample-app/sample_app/langchain_lcel.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_community.chat_models import ChatOpenAI
from langchain_openai import ChatOpenAI
from langchain_core.pydantic_v1 import BaseModel, Field


Expand Down

0 comments on commit 8e9c826

Please sign in to comment.