Skip to content

Commit

Permalink
feat: Switch the llm to ChatVertexAI
Browse files Browse the repository at this point in the history
Also use a patched version of langchain-google-vertexai module to address an issue with LangGraph with ChatVertexAI.
  • Loading branch information
anubhav756 committed Nov 15, 2024
1 parent 5ed833e commit b066cb7
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from langchain.tools import StructuredTool
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_google_vertexai import VertexAI
from langchain_google_vertexai import ChatVertexAI
from pytz import timezone

from ..orchestrator import BaseOrchestrator, classproperty
Expand Down Expand Up @@ -69,7 +69,7 @@ def initialize_agent(
prompt: ChatPromptTemplate,
model: str,
) -> "UserAgent":
llm = VertexAI(max_output_tokens=512, model_name=model, temperature=0.0)
llm = ChatVertexAI(max_output_tokens=512, model_name=model, temperature=0.0)
memory = ConversationBufferMemory(
chat_memory=ChatMessageHistory(messages=history),
memory_key="chat_history",
Expand Down
4 changes: 2 additions & 2 deletions llm_demo/orchestrator/langgraph/react_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
)
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, RunnableLambda
from langchain_google_vertexai import VertexAI
from langchain_google_vertexai import ChatVertexAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
Expand Down Expand Up @@ -85,7 +85,7 @@ async def create_graph(
tool_node = ToolNode(tools)

# model node
model = VertexAI(max_output_tokens=512, model_name=model_name, temperature=0.0)
model = ChatVertexAI(max_output_tokens=512, model_name=model_name, temperature=0.0)

# Add the prompt to the model to create a model runnable
model_runnable = prompt | model
Expand Down
2 changes: 1 addition & 1 deletion llm_demo/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ itsdangerous==2.2.0
jinja2==3.1.4
langchain-community==0.3.2
langchain==0.3.3
langchain-google-vertexai==2.0.4
langchain-google-vertexai==2.0.7
markdown==3.7
types-Markdown==3.7.0.20240822
uvicorn[standard]==0.31.0
Expand Down

0 comments on commit b066cb7

Please sign in to comment.