From 1211ba22999490d70308465d2016b09e79c3c494 Mon Sep 17 00:00:00 2001 From: Matt Zhou Date: Tue, 12 Nov 2024 17:41:32 -0800 Subject: [PATCH] add test --- letta/memory.py | 3 +-- tests/helpers/endpoints_helper.py | 36 ++++++++++++++++++++++++++++++- tests/test_endpoints.py | 21 ++++++++++++------ 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/letta/memory.py b/letta/memory.py index fa1011cb07..6fe1b8aeb5 100644 --- a/letta/memory.py +++ b/letta/memory.py @@ -51,7 +51,6 @@ def _format_summary_history(message_history: List[Message]): def summarize_messages( agent_state: AgentState, message_sequence_to_summarize: List[Message], - insert_acknowledgement_assistant_message: bool = True, ): """Summarize a message sequence using GPT""" # we need the context_window @@ -77,7 +76,7 @@ def summarize_messages( ) message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input)) - llm_config_no_inner_thoughts = agent_state.llm_config.copy(deep=True) + llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True) llm_config_no_inner_thoughts.put_inner_thoughts_in_kwargs = False response = create( llm_config=llm_config_no_inner_thoughts, diff --git a/tests/helpers/endpoints_helper.py b/tests/helpers/endpoints_helper.py index 49bb0dc1f0..2f32e67d63 100644 --- a/tests/helpers/endpoints_helper.py +++ b/tests/helpers/endpoints_helper.py @@ -38,7 +38,7 @@ FunctionCall, Message, ) -from letta.utils import get_human_text, get_persona_text +from letta.utils import get_human_text, get_persona_text, json_dumps from tests.helpers.utils import cleanup # Generate uuid for agent name for this example @@ -321,6 +321,40 @@ def check_agent_edit_core_memory(filename: str) -> LettaResponse: return response +def check_agent_summarize_memory_simple(filename: str) -> LettaResponse: + """ + Checks that the LLM is able to summarize its memory + """ + # Set up client + client = create_client() + cleanup(client=client, agent_uuid=agent_uuid) + + agent_state = setup_agent(client, filename) + + # Send a couple messages + friend_name = "Shub" + client.user_message(agent_id=agent_state.id, message="Hey, how's it going? What do you think about this whole shindig") + client.user_message(agent_id=agent_state.id, message=f"By the way, my friend's name is {friend_name}!") + client.user_message(agent_id=agent_state.id, message="Does the number 42 ring a bell?") + + # Summarize + agent = client.server._get_or_load_agent(agent_id=agent_state.id) + agent.summarize_messages_inplace() + print(f"Summarization succeeded: messages[1] = \n\n{json_dumps(agent.messages[1])}\n") + + response = client.user_message(agent_id=agent_state.id, message="What is my friend's name?") + # Basic checks + assert_sanity_checks(response) + + # Make sure my name was repeated back to me + assert_invoked_send_message_with_keyword(response.messages, friend_name) + + # Make sure some inner monologue is present + assert_inner_monologue_is_present_and_valid(response.messages) + + return response + + def run_embedding_endpoint(filename): # load JSON file config_data = json.load(open(filename, "r")) diff --git a/tests/test_endpoints.py b/tests/test_endpoints.py index 76494c0999..575ae13b85 100644 --- a/tests/test_endpoints.py +++ b/tests/test_endpoints.py @@ -7,6 +7,7 @@ check_agent_archival_memory_retrieval, check_agent_edit_core_memory, check_agent_recall_chat_memory, + check_agent_summarize_memory_simple, check_agent_uses_external_tool, check_first_response_is_valid_for_llm_endpoint, check_response_contains_keyword, @@ -58,14 +59,14 @@ def wrapper(*args, **kwargs): # ====================================================================================================================== # OPENAI TESTS # ====================================================================================================================== -def test_openai_gpt_4_returns_valid_first_message(): +def test_openai_gpt_4o_returns_valid_first_message(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_first_response_is_valid_for_llm_endpoint(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_returns_keyword(): +def test_openai_gpt_4o_returns_keyword(): keyword = "banana" filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_response_contains_keyword(filename, keyword=keyword) @@ -73,41 +74,47 @@ def test_openai_gpt_4_returns_keyword(): print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_uses_external_tool(): +def test_openai_gpt_4o_uses_external_tool(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_uses_external_tool(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_recall_chat_memory(): +def test_openai_gpt_4o_recall_chat_memory(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_recall_chat_memory(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_archival_memory_retrieval(): +def test_openai_gpt_4o_archival_memory_retrieval(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_archival_memory_retrieval(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_archival_memory_insert(): +def test_openai_gpt_4o_archival_memory_insert(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_archival_memory_insert(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") -def test_openai_gpt_4_edit_core_memory(): +def test_openai_gpt_4o_edit_core_memory(): filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") response = check_agent_edit_core_memory(filename) # Log out successful response print(f"Got successful response from client: \n\n{response}") +def test_openai_gpt_4o_summarize_memory(): + filename = os.path.join(llm_config_dir, "openai-gpt-4o.json") + response = check_agent_summarize_memory_simple(filename) + print(f"Got successful response from client: \n\n{response}") + + def test_embedding_endpoint_openai(): filename = os.path.join(embedding_config_dir, "openai_embed.json") run_embedding_endpoint(filename)