Skip to content

Commit

Permalink
[docs]: purge message graph (#1330)
Browse files Browse the repository at this point in the history
* purge message graph

* docstrings

* use with dicts
  • Loading branch information
isahers1 authored Aug 14, 2024
1 parent a3dd43b commit 34fd833
Show file tree
Hide file tree
Showing 7 changed files with 1,050 additions and 74 deletions.
170 changes: 158 additions & 12 deletions examples/chatbot-simulation-evaluation/agent-simulation-evaluation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,35 @@
"id": "0d30b6f7-3bec-4d9f-af50-43dfdc81ae6c",
"metadata": {},
"outputs": [],
"source": ["# %%capture --no-stderr\n# %pip install -U langgraph langchain langchain_openai"]
"source": [
"# %%capture --no-stderr\n",
"# %pip install -U langgraph langchain langchain_openai"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "30c2f3de-c730-4aec-85a6-af2c2f058803",
"metadata": {},
"outputs": [],
"source": ["import getpass\nimport os\n\n\ndef _set_if_undefined(var: str):\n if not os.environ.get(var):\n os.environ[var] = getpass.getpass(f\"Please provide your {var}\")\n\n\n_set_if_undefined(\"OPENAI_API_KEY\")\n_set_if_undefined(\"LANGCHAIN_API_KEY\")\n\n# Optional, add tracing in LangSmith.\n# This will help you visualize and debug the control flow\nos.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\nos.environ[\"LANGCHAIN_PROJECT\"] = \"Agent Simulation Evaluation\""]
"source": [
"import getpass\n",
"import os\n",
"\n",
"\n",
"def _set_if_undefined(var: str):\n",
" if not os.environ.get(var):\n",
" os.environ[var] = getpass.getpass(f\"Please provide your {var}\")\n",
"\n",
"\n",
"_set_if_undefined(\"OPENAI_API_KEY\")\n",
"_set_if_undefined(\"LANGCHAIN_API_KEY\")\n",
"\n",
"# Optional, add tracing in LangSmith.\n",
"# This will help you visualize and debug the control flow\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"os.environ[\"LANGCHAIN_PROJECT\"] = \"Agent Simulation Evaluation\""
]
},
{
"cell_type": "markdown",
Expand All @@ -55,7 +75,24 @@
"id": "828479af-cf9c-4888-a365-599643a96b55",
"metadata": {},
"outputs": [],
"source": ["from typing import List\n\nimport openai\n\n\n# This is flexible, but you can define your agent here, or call your agent API here.\ndef my_chat_bot(messages: List[dict]) -> dict:\n system_message = {\n \"role\": \"system\",\n \"content\": \"You are a customer support agent for an airline.\",\n }\n messages = [system_message] + messages\n completion = openai.chat.completions.create(\n messages=messages, model=\"gpt-3.5-turbo\"\n )\n return completion.choices[0].message.model_dump()"]
"source": [
"from typing import List\n",
"\n",
"import openai\n",
"\n",
"\n",
"# This is flexible, but you can define your agent here, or call your agent API here.\n",
"def my_chat_bot(messages: List[dict]) -> dict:\n",
" system_message = {\n",
" \"role\": \"system\",\n",
" \"content\": \"You are a customer support agent for an airline.\",\n",
" }\n",
" messages = [system_message] + messages\n",
" completion = openai.chat.completions.create(\n",
" messages=messages, model=\"gpt-3.5-turbo\"\n",
" )\n",
" return completion.choices[0].message.model_dump()"
]
},
{
"cell_type": "code",
Expand All @@ -77,7 +114,9 @@
"output_type": "execute_result"
}
],
"source": ["my_chat_bot([{\"role\": \"user\", \"content\": \"hi!\"}])"]
"source": [
"my_chat_bot([{\"role\": \"user\", \"content\": \"hi!\"}])"
]
},
{
"cell_type": "markdown",
Expand All @@ -96,7 +135,33 @@
"id": "32c147df-7f90-4b0d-9a6b-671677020353",
"metadata": {},
"outputs": [],
"source": ["from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\nfrom langchain_openai import ChatOpenAI\n\nsystem_prompt_template = \"\"\"You are a customer of an airline company. \\\nYou are interacting with a user who is a customer support person. \\\n\n{instructions}\n\nWhen you are finished with the conversation, respond with a single word 'FINISHED'\"\"\"\n\nprompt = ChatPromptTemplate.from_messages(\n [\n (\"system\", system_prompt_template),\n MessagesPlaceholder(variable_name=\"messages\"),\n ]\n)\ninstructions = \"\"\"Your name is Harrison. You are trying to get a refund for the trip you took to Alaska. \\\nYou want them to give you ALL the money back. \\\nThis trip happened 5 years ago.\"\"\"\n\nprompt = prompt.partial(name=\"Harrison\", instructions=instructions)\n\nmodel = ChatOpenAI()\n\nsimulated_user = prompt | model"]
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"system_prompt_template = \"\"\"You are a customer of an airline company. \\\n",
"You are interacting with a user who is a customer support person. \\\n",
"\n",
"{instructions}\n",
"\n",
"When you are finished with the conversation, respond with a single word 'FINISHED'\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", system_prompt_template),\n",
" MessagesPlaceholder(variable_name=\"messages\"),\n",
" ]\n",
")\n",
"instructions = \"\"\"Your name is Harrison. You are trying to get a refund for the trip you took to Alaska. \\\n",
"You want them to give you ALL the money back. \\\n",
"This trip happened 5 years ago.\"\"\"\n",
"\n",
"prompt = prompt.partial(name=\"Harrison\", instructions=instructions)\n",
"\n",
"model = ChatOpenAI()\n",
"\n",
"simulated_user = prompt | model"
]
},
{
"cell_type": "code",
Expand All @@ -115,7 +180,12 @@
"output_type": "execute_result"
}
],
"source": ["from langchain_core.messages import HumanMessage\n\nmessages = [HumanMessage(content=\"Hi! How can I help you?\")]\nsimulated_user.invoke({\"messages\": messages})"]
"source": [
"from langchain_core.messages import HumanMessage\n",
"\n",
"messages = [HumanMessage(content=\"Hi! How can I help you?\")]\n",
"simulated_user.invoke({\"messages\": messages})"
]
},
{
"cell_type": "markdown",
Expand Down Expand Up @@ -153,7 +223,20 @@
"id": "69e2a3a3-40f3-4223-9136-113738440be9",
"metadata": {},
"outputs": [],
"source": ["from langchain_community.adapters.openai import convert_message_to_dict\nfrom langchain_core.messages import AIMessage\n\n\ndef chat_bot_node(messages):\n # Convert from LangChain format to the OpenAI format, which our chatbot function expects.\n messages = [convert_message_to_dict(m) for m in messages]\n # Call the chat bot\n chat_bot_response = my_chat_bot(messages)\n # Respond with an AI Message\n return AIMessage(content=chat_bot_response[\"content\"])"]
"source": [
"from langchain_community.adapters.openai import convert_message_to_dict\n",
"from langchain_core.messages import AIMessage\n",
"\n",
"\n",
"def chat_bot_node(state):\n",
" messages = state[\"messages\"]\n",
" # Convert from LangChain format to the OpenAI format, which our chatbot function expects.\n",
" messages = [convert_message_to_dict(m) for m in messages]\n",
" # Call the chat bot\n",
" chat_bot_response = my_chat_bot(messages)\n",
" # Respond with an AI Message\n",
" return {\"messages\":[AIMessage(content=chat_bot_response[\"content\"])]}"
]
},
{
"cell_type": "markdown",
Expand All @@ -169,7 +252,26 @@
"id": "7cad7527-ffa5-4c30-8585-b54a7a18bd98",
"metadata": {},
"outputs": [],
"source": ["def _swap_roles(messages):\n new_messages = []\n for m in messages:\n if isinstance(m, AIMessage):\n new_messages.append(HumanMessage(content=m.content))\n else:\n new_messages.append(AIMessage(content=m.content))\n return new_messages\n\n\ndef simulated_user_node(messages):\n # Swap roles of messages\n new_messages = _swap_roles(messages)\n # Call the simulated user\n response = simulated_user.invoke({\"messages\": new_messages})\n # This response is an AI message - we need to flip this to be a human message\n return HumanMessage(content=response.content)"]
"source": [
"def _swap_roles(messages):\n",
" new_messages = []\n",
" for m in messages:\n",
" if isinstance(m, AIMessage):\n",
" new_messages.append(HumanMessage(content=m.content))\n",
" else:\n",
" new_messages.append(AIMessage(content=m.content))\n",
" return new_messages\n",
"\n",
"\n",
"def simulated_user_node(state):\n",
" messages = state[\"messages\"]\n",
" # Swap roles of messages\n",
" new_messages = _swap_roles(messages)\n",
" # Call the simulated user\n",
" response = simulated_user.invoke({\"messages\": new_messages})\n",
" # This response is an AI message - we need to flip this to be a human message\n",
" return {\"messages\":[HumanMessage(content=response.content)]}"
]
},
{
"cell_type": "markdown",
Expand All @@ -192,7 +294,16 @@
"id": "28004fbf-a2f3-46b7-bde7-46c7adaf97fb",
"metadata": {},
"outputs": [],
"source": ["def should_continue(messages):\n if len(messages) > 6:\n return \"end\"\n elif messages[-1].content == \"FINISHED\":\n return \"end\"\n else:\n return \"continue\""]
"source": [
"def should_continue(state):\n",
" messages = state[\"messages\"]\n",
" if len(messages) > 6:\n",
" return \"end\"\n",
" elif messages[-1].content == \"FINISHED\":\n",
" return \"end\"\n",
" else:\n",
" return \"continue\""
]
},
{
"cell_type": "markdown",
Expand All @@ -210,7 +321,36 @@
"id": "0b597e4b-4cbb-4bbc-82e5-f7e31275964c",
"metadata": {},
"outputs": [],
"source": ["from langgraph.graph import END, MessageGraph, START\n\ngraph_builder = MessageGraph()\ngraph_builder.add_node(\"user\", simulated_user_node)\ngraph_builder.add_node(\"chat_bot\", chat_bot_node)\n# Every response from your chat bot will automatically go to the\n# simulated user\ngraph_builder.add_edge(\"chat_bot\", \"user\")\ngraph_builder.add_conditional_edges(\n \"user\",\n should_continue,\n # If the finish criteria are met, we will stop the simulation,\n # otherwise, the virtual user's message will be sent to your chat bot\n {\n \"end\": END,\n \"continue\": \"chat_bot\",\n },\n)\n# The input will first go to your chat bot\ngraph_builder.add_edge(START, \"chat_bot\")\nsimulation = graph_builder.compile()"]
"source": [
"from langgraph.graph import END, StateGraph, START\n",
"from langgraph.graph.message import add_messages\n",
"from typing import Annotated\n",
"from typing_extensions import TypedDict\n",
"\n",
"\n",
"class State(TypedDict):\n",
" messages: Annotated[list, add_messages]\n",
"\n",
"graph_builder = StateGraph(State)\n",
"graph_builder.add_node(\"user\", simulated_user_node)\n",
"graph_builder.add_node(\"chat_bot\", chat_bot_node)\n",
"# Every response from your chat bot will automatically go to the\n",
"# simulated user\n",
"graph_builder.add_edge(\"chat_bot\", \"user\")\n",
"graph_builder.add_conditional_edges(\n",
" \"user\",\n",
" should_continue,\n",
" # If the finish criteria are met, we will stop the simulation,\n",
" # otherwise, the virtual user's message will be sent to your chat bot\n",
" {\n",
" \"end\": END,\n",
" \"continue\": \"chat_bot\",\n",
" },\n",
")\n",
"# The input will first go to your chat bot\n",
"graph_builder.add_edge(START, \"chat_bot\")\n",
"simulation = graph_builder.compile()"
]
},
{
"cell_type": "markdown",
Expand Down Expand Up @@ -251,15 +391,21 @@
]
}
],
"source": ["for chunk in simulation.stream([]):\n # Print out all events aside from the final end chunk\n if END not in chunk:\n print(chunk)\n print(\"----\")"]
"source": [
"for chunk in simulation.stream({}):\n",
" # Print out all events aside from the final end chunk\n",
" if END not in chunk:\n",
" print(chunk)\n",
" print(\"----\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dde4f2b5-cfe8-4ff0-99ea-fe2c5fed70c0",
"metadata": {},
"outputs": [],
"source": [""]
"source": []
}
],
"metadata": {
Expand Down
10 changes: 8 additions & 2 deletions examples/chatbots/information-gather-prompting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,10 +177,16 @@
"outputs": [],
"source": [
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.graph import START, MessageGraph\n",
"from langgraph.graph import StateGraph, START\n",
"from langgraph.graph.message import add_messages\n",
"from typing import Annotated\n",
"from typing_extensions import TypedDict\n",
"\n",
"class State(TypedDict):\n",
" messages: Annotated[list, add_messages]\n",
"\n",
"memory = MemorySaver()\n",
"workflow = MessageGraph()\n",
"workflow = StateGraph(State)\n",
"workflow.add_node(\"info\", chain)\n",
"workflow.add_node(\"prompt\", prompt_gen_chain)\n",
"\n",
Expand Down
Loading

0 comments on commit 34fd833

Please sign in to comment.