diff --git a/docs/_scripts/copy_notebooks.py b/docs/_scripts/copy_notebooks.py index e50b1eab5..382edbb96 100644 --- a/docs/_scripts/copy_notebooks.py +++ b/docs/_scripts/copy_notebooks.py @@ -30,6 +30,7 @@ "input_output_schema.ipynb", "pass_private_state.ipynb", "memory/manage-conversation-history.ipynb", + "memory/shared-state.ipynb", "subgraphs-manage-state.ipynb", "subgraph-transform-state.ipynb", "memory/delete-messages.ipynb", diff --git a/docs/docs/cloud/how-tos/index.md b/docs/docs/cloud/how-tos/index.md index f1bf22398..b75cae2dc 100644 --- a/docs/docs/cloud/how-tos/index.md +++ b/docs/docs/cloud/how-tos/index.md @@ -75,3 +75,4 @@ Other guides that may prove helpful! - [How to integrate webhooks](./webhooks.md) - [How to copy threads](./copy_threads.md) - [How to check status of your threads](./check_thread_status.md) +- [How to share state between threads](./shared_state.md) \ No newline at end of file diff --git a/docs/docs/cloud/how-tos/shared_state.md b/docs/docs/cloud/how-tos/shared_state.md new file mode 100644 index 000000000..77b1b3df9 --- /dev/null +++ b/docs/docs/cloud/how-tos/shared_state.md @@ -0,0 +1,492 @@ +# How to share state between threads + +By default, state in a graph is scoped to a specific thread. LangGraph also allows you to specify a "scope" for a given key/value pair that exists between threads. This can be useful for storing information that is shared between threads. For instance, you may want to store information about a user's preferences expressed in one thread, and then use that information in another thread. + +In this notebook we will go through an example of how to use a graph that has been deployed with shared state. + +## Setup + +First, make sure that you have a deployed graph that has a shared state key. Your state definition should look something like this (support for shared state channels in JS is coming soon!): + +```python +class AgentState(TypedDict): + # This is scoped to a user_id, so it will be information specific to each user + info: Annotated[dict, SharedValue.on("user_id")] + # ... other state keys ... +``` +!!! note "Typing shared state keys" + Shared state channels (keys) MUST be dictionaries (see `info` channel in the AgentState example above) + +Now we can setup our client and an initial thread to run the graph on: + +=== "Python" + + ```python + from langgraph_sdk import get_client + + client = get_client(url=) + # Using the graph deployed with the name "agent" + assistant_id = "agent"; + # create thread + thread = await client.threads.create() + ``` + +=== "Javascript" + + ```js + import { Client } from "@langchain/langgraph-sdk"; + + const client = new Client({ apiUrl: }); + // Using the graph deployed with the name "agent" + const assistantID = "agent"; + // create thread + let thread = await client.threads.create(); + ``` + +=== "CURL" + + ```bash + curl --request POST \ + --url /threads \ + --header 'Content-Type: application/json' \ + --data '{}' + ``` + +## Usage + +Now, let's run the graph on the first thread, and provide it some information about the users preferences: + +=== "Python" + + ```python + input = {"messages": [{"role": "human", "content": "i like pepperoni pizza"}]} + config = {"configurable": {"user_id": "123"}} + # stream values + async for chunk in client.runs.stream( + thread["thread_id"], + assistant_id, + input=input, + config=config, + ): + print(f"Receiving new event of type: {chunk.event}...") + print(chunk.data) + print("\n\n") + ``` + +=== "Javascript" + + ```js + // create input + let input = { + messages: [ + { + role: "human", + content: "i like pepperoni pizza", + } + ] + }; + let config = { configurable: { user_id: "123" } }; + + const streamResponse = client.runs.stream( + thread["thread_id"], + assistantID, + { + input, + config + } + ); + for await (const chunk of streamResponse) { + console.log(`Receiving new event of type: ${chunk.event}...`); + console.log(chunk.data); + console.log("\n\n"); + } + ``` + +=== "CURL" + + ```bash + curl --request POST \ + --url /threads//runs/stream \ + --header 'Content-Type: application/json' \ + --data "{ + \"assistant_id\": \"agent\", + \"input\": {\"messages\": [{\"role\": \"human\", \"content\": \"i like pepperoni pizza\"}]}, + \"config\":{\"configurable\":{\"user_id\":\"123\"}} + }" | \ + sed 's/\r$//' | \ + awk ' + /^event:/ { + if (data_content != "") { + print data_content "\n" + } + sub(/^event: /, "Receiving event of type: ", $0) + printf "%s...\n", $0 + data_content = "" + } + /^data:/ { + sub(/^data: /, "", $0) + data_content = $0 + } + END { + if (data_content != "") { + print data_content "\n" + } + } + ' + ``` + +Output: + + Receiving new event of type: metadata... + {'run_id': '1ef6bdb2-ba0e-6177-84a9-c574772223b3'} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'function': {'arguments': '{"fact":"Isaac likes pepperoni pizza","topic":"Food"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-f086646f-cb38-4419-9a92-fc7cb19340ee', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac likes pepperoni pizza', 'topic': 'Food'}, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'function': {'arguments': '{"fact":"Isaac likes pepperoni pizza","topic":"Food"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-f086646f-cb38-4419-9a92-fc7cb19340ee', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac likes pepperoni pizza', 'topic': 'Food'}, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}, {'content': 'Saved!', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'tool', 'name': None, 'id': 'bed77d11-916c-4bad-b8f8-f0c850a8e494', 'tool_call_id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'artifact': None, 'status': 'success'}]} + + + +Let's stay on the same thread and provide some additional information. Note that we are not redefining the config since we want to continue the conversation on the same thread with the same user. + +=== "Python" + + ```python + input = {"messages": [{"role": "human", "content": "i also just moved to SF"}]} + # stream values + async for chunk in client.runs.stream( + thread["thread_id"], + assistant_id, # the graph name + input=input, + config=config, + ): + print(f"Receiving new event of type: {chunk.event}...") + print(chunk.data) + print("\n\n") + ``` + +=== "Javascript" + + ```js + input = { + messages: [ + { + role: "human", + content: "i also just moved to SF", + } + ] + }; + + const streamResponse = client.runs.stream( + thread["thread_id"], + assistantID, + { + input, + config + } + ); + + for await (const chunk of streamResponse) { + console.log(`Receiving new event of type: ${chunk.event}...`); + console.log(chunk.data); + console.log("\n\n"); + } + ``` + +=== "CURL" + + ```bash + curl --request POST \ + --url /threads//runs/stream \ + --header 'Content-Type: application/json' \ + --data "{ + \"assistant_id\": \"agent\", + \"input\": {\"messages\": [{\"role\": \"human\", \"content\": \"i also just moved to SF\"}]}, + \"config\":{\"configurable\":{\"user_id\":\"123\"}} + }" | \ + sed 's/\r$//' | \ + awk ' + /^event:/ { + if (data_content != "") { + print data_content "\n" + } + sub(/^event: /, "Receiving event of type: ", $0) + printf "%s...\n", $0 + data_content = "" + } + /^data:/ { + sub(/^data: /, "", $0) + data_content = $0 + } + END { + if (data_content != "") { + print data_content "\n" + } + } + ' + ``` + +Output: + + Receiving new event of type: metadata... + {'run_id': '1ef6bdb2-f068-60b6-93a6-b2e2f02f117d'} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'function': {'arguments': '{"fact":"Isaac likes pepperoni pizza","topic":"Food"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-f086646f-cb38-4419-9a92-fc7cb19340ee', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac likes pepperoni pizza', 'topic': 'Food'}, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}, {'content': 'Saved!', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'tool', 'name': None, 'id': 'bed77d11-916c-4bad-b8f8-f0c850a8e494', 'tool_call_id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'artifact': None, 'status': 'success'}, {'content': 'i also just moved to SF', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1e98940-d9fc-454c-bb65-0036e2c048c6', 'example': False}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'function': {'arguments': '{"fact":"Isaac likes pepperoni pizza","topic":"Food"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-f086646f-cb38-4419-9a92-fc7cb19340ee', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac likes pepperoni pizza', 'topic': 'Food'}, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}, {'content': 'Saved!', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'tool', 'name': None, 'id': 'bed77d11-916c-4bad-b8f8-f0c850a8e494', 'tool_call_id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'artifact': None, 'status': 'success'}, {'content': 'i also just moved to SF', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1e98940-d9fc-454c-bb65-0036e2c048c6', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_yPNnY10h9KszyuVf5p6H2c1E', 'function': {'arguments': '{"fact":"Isaac just moved to SF","topic":"Location"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-d247f67f-ba1a-4ce7-84c2-0f30180d10c6', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac just moved to SF', 'topic': 'Location'}, 'id': 'call_yPNnY10h9KszyuVf5p6H2c1E', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'i like pepperoni pizza', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1244b8b-e54e-4ebe-ada4-63aadf4a7701', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'function': {'arguments': '{"fact":"Isaac likes pepperoni pizza","topic":"Food"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-f086646f-cb38-4419-9a92-fc7cb19340ee', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac likes pepperoni pizza', 'topic': 'Food'}, 'id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}, {'content': 'Saved!', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'tool', 'name': None, 'id': 'bed77d11-916c-4bad-b8f8-f0c850a8e494', 'tool_call_id': 'call_ujnk8CIx0xeguFHHe8P0ecgm', 'artifact': None, 'status': 'success'}, {'content': 'i also just moved to SF', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'f1e98940-d9fc-454c-bb65-0036e2c048c6', 'example': False}, {'content': '', 'additional_kwargs': {'tool_calls': [{'index': 0, 'id': 'call_yPNnY10h9KszyuVf5p6H2c1E', 'function': {'arguments': '{"fact":"Isaac just moved to SF","topic":"Location"}', 'name': 'Info'}, 'type': 'function'}]}, 'response_metadata': {'finish_reason': 'tool_calls', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-d247f67f-ba1a-4ce7-84c2-0f30180d10c6', 'example': False, 'tool_calls': [{'name': 'Info', 'args': {'fact': 'Isaac just moved to SF', 'topic': 'Location'}, 'id': 'call_yPNnY10h9KszyuVf5p6H2c1E', 'type': 'tool_call'}], 'invalid_tool_calls': [], 'usage_metadata': None}, {'content': 'Saved!', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'tool', 'name': None, 'id': '7c4b82b0-5ee5-4d97-902b-dbec1499fe39', 'tool_call_id': 'call_yPNnY10h9KszyuVf5p6H2c1E', 'artifact': None, 'status': 'success'}]} + + + + + + + +Now, let's run the graph on a completely different thread, and see that it remembered the information we provided it: + + +=== "Python" + + ```python + # new thread for new conversation + thread = await client.threads.create() + input = {"messages": [{"role": "human", "content": "where and what should i eat for dinner? Can you list some restaurants?"}]} + # stream values + async for chunk in client.runs.stream( + thread["thread_id"], + assistant_id, + input=input, + config=config, + ): + print(f"Receiving new event of type: {chunk.event}...") + print(chunk.data) + print("\n\n") + ``` + +=== "Javascript" + + ```js + // new thread for new conversation + thread = await client.threads.create(); + + // create input + let input = { + messages: [ + { + role: "human", + content: "where and what should i eat for dinner? Can you list some restaurants?", + } + ] + }; + + const streamResponse = client.runs.stream( + thread["thread_id"], + assistantID, + { + input, + config + } + ); + for await (const chunk of streamResponse) { + console.log(`Receiving new event of type: ${chunk.event}...`); + console.log(chunk.data); + console.log("\n\n"); + } + ``` + +=== "CURL" + + ```bash + curl --request POST \ + --url /threads \ + --header 'Content-Type: application/json' \ + --data '{}' \ + | jq -r '.thread_id' \ + | xargs -I {} \ + curl --request POST \ + --url /threads/{}/runs/stream \ + --header 'Content-Type: application/json' \ + --data '{ + "assistant_id": "agent", + "input": { + "messages": [{ + "role": "human", + "content": "where and what should i eat for dinner? Can you list some restaurants?" + }] + }, + "config": { + "configurable": { + "user_id": "123" + } + } + }' \ + | sed 's/\r$//' \ + | awk ' + /^event:/ { + if (data_content != "") { + print data_content "\n" + } + sub(/^event: /, "Receiving event of type: ", $0) + printf "%s...\n", $0 + data_content = "" + } + /^data:/ { + sub(/^data: /, "", $0) + data_content = $0 + } + END { + if (data_content != "") { + print data_content "\n" + } + } + ' + ``` + +Output: + + Receiving new event of type: metadata... + {'run_id': '1ef6bde9-d866-623c-8647-a56e33322334'} + + + + Receiving new event of type: values... + {'messages': [{'content': 'where and what should i eat for dinner? Can you list some restaurants?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'aaf07830-ddbf-4ec6-b520-2371490abaa8', 'example': False}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'where and what should i eat for dinner? Can you list some restaurants?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'aaf07830-ddbf-4ec6-b520-2371490abaa8', 'example': False}, {'content': "Sure! Since you just moved to SF, I can suggest some popular restaurants in the area. Here are a few options:\n\n1. Tony's Pizza Napoletana - Known for their delicious pizzas, including pepperoni pizza.\n2. The House - Offers Asian fusion cuisine in a cozy setting.\n3. Tadich Grill - A historic seafood restaurant serving classic dishes.\n4. Swan Oyster Depot - A seafood counter known for its fresh seafood selections.\n5. Zuni Cafe - A popular spot for American and Mediterranean-inspired dishes.\n\nDo any of these options sound good to you? Let me know if you need more recommendations or information about any specific cuisine!", 'additional_kwargs': {}, 'response_metadata': {'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-dbac2e4c-0e4b-4c4d-b17f-172456222f53', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]} + + + +Perfect! The AI recommended restaurants in SF, and included a pizza restaurant at the top of it's list. + +Let's now run the graph for another user to verify that the preferences of the first user are self contained: + +=== "Python" + + ```python + # new thread for new conversation + thread = await client.threads.create() + # create input + input = {"messages": [{"role": "human", "content": "where do I live? what do I like to eat?"}]} + config = {"configurable": {"user_id": "321"}} + # stream values + async for chunk in client.runs.stream( + thread["thread_id"], + assistant_id, + input=input, + config=config, + ): + print(f"Receiving new event of type: {chunk.event}...") + print(chunk.data) + print("\n\n") + ``` + +=== "Javascript" + + ```js + // new thread for new conversation + thread = await client.threads.create(); + // create input + let input = { + messages: [ + { + role: "human", + content: "where do I live? what do I like to eat?", + } + ] + }; + let config = { configurable: { user_id: "321" } }; + + const streamResponse = client.runs.stream( + thread["thread_id"], + assistantID, + { + input, + config + } + ); + for await (const chunk of streamResponse) { + console.log(`Receiving new event of type: ${chunk.event}...`); + console.log(chunk.data); + console.log("\n\n"); + } + ``` + +=== "CURL" + + ```bash + curl --request POST \ + --url /threads \ + --header 'Content-Type: application/json' \ + --data '{}' \ + | jq -r '.thread_id' \ + | xargs -I {} \ + curl --request POST \ + --url /threads/{}/runs/stream \ + --header 'Content-Type: application/json' \ + --data "{ + \"assistant_id\": \"agent\", + \"input\": {\"messages\": [{\"role\": \"human\", \"content\": \"where do I live? what do I like to eat?\"}]}, + \"config\":{\"configurable\":{\"user_id\":\"321\"}} + }" | \ + sed 's/\r$//' | \ + awk ' + /^event:/ { + if (data_content != "") { + print data_content "\n" + } + sub(/^event: /, "Receiving event of type: ", $0) + printf "%s...\n", $0 + data_content = "" + } + /^data:/ { + sub(/^data: /, "", $0) + data_content = $0 + } + END { + if (data_content != "") { + print data_content "\n" + } + } + ' + ``` + +Output: + + Receiving new event of type: metadata... + {'run_id': '1ef6bdf3-6aae-63ab-adc4-0a1467251531'} + + + + Receiving new event of type: values... + {'messages': [{'content': 'where do I live? what do I like to eat?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': '043fd6a6-b59b-411b-9ec3-f6947260e6d3', 'example': False}]} + + + + Receiving new event of type: values... + {'messages': [{'content': 'where do I live? what do I like to eat?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': '043fd6a6-b59b-411b-9ec3-f6947260e6d3', 'example': False}, {'content': "I don't have that information yet. Can you please provide me with details about where you live and what you like to eat?", 'additional_kwargs': {}, 'response_metadata': {'finish_reason': 'stop', 'model_name': 'gpt-3.5-turbo-0125'}, 'type': 'ai', 'name': None, 'id': 'run-4dd3415d-e75b-44d5-9744-14f840e6c696', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]} + +Perfect! The agent does not have access to the first users preferences as we expect! + diff --git a/docs/docs/how-tos/index.md b/docs/docs/how-tos/index.md index c0a717c4d..171294b3d 100644 --- a/docs/docs/how-tos/index.md +++ b/docs/docs/how-tos/index.md @@ -27,6 +27,7 @@ LangGraph makes it easy to persist state across graph runs. The guide below show - [How to use Postgres checkpointer for persistence](persistence_postgres.ipynb) - [How to create a custom checkpointer using MongoDB](persistence_mongodb.ipynb) - [How to create a custom checkpointer using Redis](persistence_redis.ipynb) +- [How to share state between threads](memory/shared-state.ipynb) ## Human in the Loop diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index be5f11513..9291a309f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -137,6 +137,7 @@ nav: - Use Postgres checkpointer for persistence: how-tos/persistence_postgres.ipynb - Create custom checkpointer using MongoDB: how-tos/persistence_mongodb.ipynb - Create custom checkpointer using Redis: how-tos/persistence_redis.ipynb + - Share state between threads: how-tos/memory/shared-state.ipynb - Human-in-the-loop: - Add breakpoints: how-tos/human_in_the_loop/breakpoints.ipynb - Add dynamic breakpoints: how-tos/human_in_the_loop/dynamic_breakpoints.ipynb @@ -239,6 +240,7 @@ nav: - Integrate Webhooks: 'cloud/how-tos/webhooks.md' - Copy Threads: 'cloud/how-tos/copy_threads.md' - Check Status of Threads: "cloud/how-tos/check_thread_status.md" + - Share State Between Threads: "cloud/how-tos/shared_state.md" - Conceptual Guides: - API Concepts: "cloud/concepts/api.md" - Cloud Concepts: "cloud/concepts/cloud.md" diff --git a/examples/memory/shared-state.ipynb b/examples/memory/shared-state.ipynb new file mode 100644 index 000000000..ae236b77b --- /dev/null +++ b/examples/memory/shared-state.ipynb @@ -0,0 +1,285 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7240d5b5-9dac-4070-8a9e-2350fb01e0be", + "metadata": {}, + "source": [ + "# How to share state between threads\n", + "\n", + "By default, state in a graph is scoped to that thread.\n", + "LangGraph also allows you to specify a \"scope\" for a given key/value pair that exists between threads. This can be useful for storing information that is shared between threads. For instance, you may want to store information about a user's preferences expressed in one thread, and then use that information in another thread.\n", + "\n", + "In this notebook we will go through an example of how to construct and use such a graph." + ] + }, + { + "cell_type": "markdown", + "id": "c4c550b5-1954-496b-8b9d-800361af17dc", + "metadata": {}, + "source": [ + "## Create graph\n", + "\n", + "In this example we will create a graph that will let us store information about a user's preferences. We will do so by defining a state key that will be scoped to a user_id, and allowing the model to populate this field as it deems fit (by providing the model with a tool to save information about the user).\n", + "\n", + " \n", + "
\n", + "

Typing shared state keys

\n", + "

\n", + " Shared state channels (keys) MUST be dictionaries (see `info` channel in the AgentState example below)\n", + "

\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a7f303d6-612e-4e34-bf36-29d4ed25d802", + "metadata": {}, + "outputs": [], + "source": [ + "from langgraph.graph.graph import START, END\n", + "from langgraph.graph.message import MessagesState\n", + "from langgraph.graph.state import StateGraph\n", + "from langgraph.store.memory import MemoryStore\n", + "from langgraph.managed.shared_value import SharedValue\n", + "from typing import TypedDict, Annotated, Any\n", + "import uuid\n", + "from langchain_openai import ChatOpenAI\n", + "from langgraph.checkpoint.memory import MemorySaver\n", + "\n", + "\n", + "class AgentState(MessagesState):\n", + " # We use an info key to track information\n", + " # This is scoped to a user_id, so it will be information specific to each user\n", + " info: Annotated[dict, SharedValue.on(\"user_id\")]\n", + "\n", + "\n", + "# We will give this as a tool to the agent\n", + "# This will let the agent call this tool to save a fact\n", + "class Info(TypedDict):\n", + " \"\"\"This tool should be called when you want to save a new fact about the user.\n", + " \n", + " Attributes:\n", + " fact (str): A fact about the user.\n", + " topic (str): The topic related the fact is about, i.e. Food, Location, Movies, etc.\n", + " \"\"\"\n", + " fact: str\n", + " topic: str\n", + "\n", + "\n", + "# This is the prompt we give the agent\n", + "# We will pass known info into the prompt\n", + "# We will tell it to use the Info tool to save more\n", + "prompt = \"\"\"You are helpful assistant.\n", + "\n", + "Here is what you know about the user:\n", + "\n", + "\n", + "{info}\n", + "\n", + "\n", + "Help out the user. If the user tells you any information about themselves, save the information using the `Info` tool.\n", + "\n", + "This means if the user provides any sort of fact about themselves, be it an opinion they have, a fact about themselves, etc. SAVE IT!\n", + "\"\"\"\n", + "\n", + "\n", + "# We give the model access to the Info tool\n", + "model = ChatOpenAI().bind_tools([Info])\n", + "\n", + "\n", + "# Our first node - this will call the model\n", + "def call_model(state):\n", + " # We get all facts and assemble them into a string\n", + " facts = [d['fact'] for d in state['info'].values()]\n", + " info = \"\\n\".join(facts)\n", + " # Format system prompt\n", + " system_msg = prompt.format(info=info)\n", + " # Call model\n", + " response = model.invoke([{\"role\": \"system\", \"content\": system_msg}] + state['messages'])\n", + " return {\"messages\": [response]}\n", + "\n", + "\n", + "# Routing function to decide what to do next\n", + "# If no tool calls, then we end\n", + "# If tool calls, then we update memory\n", + "def route(state):\n", + " if len(state['messages'][-1].tool_calls) == 0:\n", + " return END\n", + " else:\n", + " return \"update_memory\"\n", + "\n", + "\n", + "# This function is responsible for updating the memory\n", + "def update_memory(state):\n", + " tool_calls = []\n", + " memories = {}\n", + " # Each tool call is a new memory to save\n", + " for tc in state['messages'][-1].tool_calls:\n", + " # We append ToolMessages (to pass back to the LLM)\n", + " # This is needed because OpenAI requires each tool call be followed by a ToolMessage\n", + " tool_calls.append({\"role\": \"tool\", \"content\": \"Saved!\", \"tool_call_id\": tc['id']})\n", + " # We create a new memory from this tool call\n", + " memories[str(uuid.uuid4())] = {\"fact\": tc['args']['fact'], \"topic\": tc['args']['topic']}\n", + " # Return the messages and memories to update the state with\n", + " return {\"messages\": tool_calls, \"info\": memories}\n", + "\n", + "\n", + "# This is the in memory checkpointer we will use\n", + "# We need this because we want to enable threads (conversations)\n", + "memory = MemorySaver()\n", + "\n", + "# This is the in memory Key Value store\n", + "# This is needed to save the memories\n", + "kv = MemoryStore()\n", + "\n", + "# Construct this relatively simple graph\n", + "graph = StateGraph(AgentState)\n", + "graph.add_node(call_model)\n", + "graph.add_node(update_memory)\n", + "graph.add_edge(\"update_memory\", END)\n", + "graph.add_edge(START, \"call_model\")\n", + "graph.add_conditional_edges(\"call_model\", route)\n", + "graph = graph.compile(checkpointer=memory, store=kv)" + ] + }, + { + "cell_type": "markdown", + "id": "552d4e33-556d-4fa5-8094-2a076bc21529", + "metadata": {}, + "source": [ + "## Run graph on one thread\n", + "\n", + "We can now run the graph on one thread and give it some information" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "18bd8679-3a73-4033-bfb4-5093ac1f5d7f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'call_model': {'messages': [AIMessage(content='Hello! How can I assist you today?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 171, 'total_tokens': 181}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-fbbb73a4-7c94-4db1-8761-44ea2fe9feaf-0', usage_metadata={'input_tokens': 171, 'output_tokens': 10, 'total_tokens': 181})]}}\n", + "{'call_model': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_zMUXZfhOCFYvZg5TwXyBzw16', 'function': {'arguments': '{\"fact\":\"I like pepperoni pizza\",\"topic\":\"Food\"}', 'name': 'Info'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 193, 'total_tokens': 214}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-7297f9fb-1d3e-480e-b125-ab269f648158-0', tool_calls=[{'name': 'Info', 'args': {'fact': 'I like pepperoni pizza', 'topic': 'Food'}, 'id': 'call_zMUXZfhOCFYvZg5TwXyBzw16', 'type': 'tool_call'}], usage_metadata={'input_tokens': 193, 'output_tokens': 21, 'total_tokens': 214})]}}\n", + "{'update_memory': {'messages': [{'role': 'tool', 'content': 'Saved!', 'tool_call_id': 'call_zMUXZfhOCFYvZg5TwXyBzw16'}]}}\n", + "{'call_model': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_GjshujJAeqoTuuBeHCD5YTPQ', 'function': {'arguments': '{\"fact\":\"I just moved to SF\",\"topic\":\"Location\"}', 'name': 'Info'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 21, 'prompt_tokens': 239, 'total_tokens': 260}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-4abea1d6-7ccb-49b4-b805-0e04ebb542e3-0', tool_calls=[{'name': 'Info', 'args': {'fact': 'I just moved to SF', 'topic': 'Location'}, 'id': 'call_GjshujJAeqoTuuBeHCD5YTPQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 239, 'output_tokens': 21, 'total_tokens': 260})]}}\n", + "{'update_memory': {'messages': [{'role': 'tool', 'content': 'Saved!', 'tool_call_id': 'call_GjshujJAeqoTuuBeHCD5YTPQ'}]}}\n" + ] + } + ], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"1\", \"user_id\": \"1\"}}\n", + "\n", + "# First let's just say hi to the AI\n", + "for update in graph.stream({\"messages\": [{\"role\": \"user\", \"content\": \"hi\"}]}, config, stream_mode=\"updates\"):\n", + " print(update)\n", + "\n", + "# Let's continue the conversation (by passing the same config) and tell the AI we like pepperoni pizza\n", + "for update in graph.stream({\"messages\": [{\"role\": \"user\", \"content\": \"i like pepperoni pizza\"}]}, config, stream_mode=\"updates\"):\n", + " print(update)\n", + "\n", + "# Let's continue the conversation even further (by passing the same config) and tell the AI we live in SF\n", + "for update in graph.stream({\"messages\": [{\"role\": \"user\", \"content\": \"i also just moved to SF\"}]}, config, stream_mode=\"updates\"):\n", + " print(update)" + ] + }, + { + "cell_type": "markdown", + "id": "b8c416fa-086a-491d-a7d3-57091f6413e3", + "metadata": {}, + "source": [ + "## Run graph on a different thread\n", + "\n", + "We can now run the graph on a different thread and see that it remembers facts about the user (specifically that the user likes pepperoni pizza and lives in SF):" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e240f025-ff8b-4d17-beb7-2420c0575dd9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'call_model': {'messages': [AIMessage(content=\"Sure! Since you just moved to San Francisco, how about trying some popular local spots? Here are a few restaurant recommendations in SF:\\n\\n1. Tony's Pizza Napoletana - Known for their delicious pepperoni pizza!\\n2. The Slanted Door - A popular Vietnamese restaurant in the city.\\n3. Zuni Cafe - A classic American restaurant with a great ambiance.\\n4. Tartine Bakery - Perfect for a casual dinner with amazing baked goods.\\n5. State Bird Provisions - A unique dining experience with small plates and a lively atmosphere.\\n\\nFeel free to explore these options and enjoy your dinner! If you need more recommendations or information about a specific cuisine, let me know!\", additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 138, 'prompt_tokens': 197, 'total_tokens': 335}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-de8ad08c-0810-4bb5-b2e8-d3dc89522f8e-0', usage_metadata={'input_tokens': 197, 'output_tokens': 138, 'total_tokens': 335})]}}\n" + ] + } + ], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"2\", \"user_id\": \"1\"}}\n", + "\n", + "for update in graph.stream({\"messages\": [{\"role\": \"user\", \"content\": \"where and what should i eat for dinner? Can you list some restaurants?\"}]}, config, stream_mode=\"updates\"):\n", + " print(update)" + ] + }, + { + "cell_type": "markdown", + "id": "091995d3", + "metadata": {}, + "source": [ + "Perfect! The AI recommended restaurants in SF, and included a pizza restaurant at the top of it's list.\n", + "\n", + "Notice that the `messages` in this new thread do NOT contain the messages from the previous thread since we didn't store them as shared values across the `user_id`. However, the `info` we saved in the previous thread was saved since we passed in the same `user_id` in this new thread.\n", + "\n", + "Let's now run the graph for another user to verify that the preferences of the first user are self contained:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f9bf2c15", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'call_model': {'messages': [AIMessage(content='I can definitely help you with that! To provide you with personalized restaurant recommendations, could you please let me know your location or any specific preferences you have for dinner?', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 34, 'prompt_tokens': 185, 'total_tokens': 219}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a483acf-1289-4d7f-b707-97760a8c3620-0', usage_metadata={'input_tokens': 185, 'output_tokens': 34, 'total_tokens': 219})]}}\n" + ] + } + ], + "source": [ + "config = {\"configurable\": {\"thread_id\": \"3\", \"user_id\": \"2\"}}\n", + "\n", + "for update in graph.stream({\"messages\": [{\"role\": \"user\", \"content\": \"where and what should i eat for dinner? Can you list some restaurants?\"}]}, config, stream_mode=\"updates\"):\n", + " print(update)" + ] + }, + { + "cell_type": "markdown", + "id": "b7086cea", + "metadata": {}, + "source": [ + "Perfect! The graph has forgotten all of the previous preferences and has to ask the user for it's location and dietary preferences." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}