diff --git a/examples/Building agents with Letta.ipynb b/examples/Building agents with Letta.ipynb index 1acf0f578e..7503785f71 100644 --- a/examples/Building agents with Letta.ipynb +++ b/examples/Building agents with Letta.ipynb @@ -13,16 +13,6 @@ "4. Building agentic RAG with MemGPT " ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "f096bd03-9fb7-468f-af3c-24cd9e03108c", - "metadata": {}, - "outputs": [], - "source": [ - "from helper import nb_print" - ] - }, { "cell_type": "markdown", "id": "aad3a8cc-d17a-4da1-b621-ecc93c9e2106", @@ -62,9 +52,10 @@ "metadata": {}, "outputs": [], "source": [ - "from letta.schemas.llm_config import LLMConfig\n", + "from letta import LLMConfig, EmbeddingConfig\n", "\n", - "client.set_default_llm_config(LLMConfig.default_config(\"gpt-4o-mini\")) " + "client.set_default_llm_config(LLMConfig.default_config(\"gpt-4o-mini\")) \n", + "client.set_default_embedding_config(EmbeddingConfig.default_config(provider=\"openai\")) " ] }, { @@ -124,7 +115,7 @@ " message=\"hello!\", \n", " role=\"user\" \n", ")\n", - "nb_print(response.messages)" + "response" ] }, { @@ -257,7 +248,7 @@ " message = \"My name is actually Bob\", \n", " role = \"user\"\n", ") \n", - "nb_print(response.messages)" + "response" ] }, { @@ -291,7 +282,7 @@ " message = \"In the future, never use emojis to communicate\", \n", " role = \"user\"\n", ") \n", - "nb_print(response.messages)" + "response" ] }, { @@ -353,7 +344,7 @@ " message = \"Save the information that 'bob loves cats' to archival\", \n", " role = \"user\"\n", ") \n", - "nb_print(response.messages)" + "response" ] }, { @@ -407,15 +398,23 @@ " role=\"user\", \n", " message=\"What animals do I like? Search archival.\"\n", ")\n", - "nb_print(response.messages)" + "response" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "adc394c8-1d88-42bf-a6a5-b01f20f78d81", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "letta", + "display_name": "letta-main", "language": "python", - "name": "letta" + "name": "letta-main" }, "language_info": { "codemirror_mode": { @@ -427,7 +426,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.12.6" } }, "nbformat": 4, diff --git a/letta/agent.py b/letta/agent.py index f49351d753..e1939ffb46 100644 --- a/letta/agent.py +++ b/letta/agent.py @@ -248,9 +248,11 @@ def __init__( # initialize a tool rules solver if agent_state.tool_rules: # if there are tool rules, print out a warning - warnings.warn("Tool rules only work reliably for the latest OpenAI models that support structured outputs.") + for rule in agent_state.tool_rules: + if not isinstance(rule, TerminalToolRule): + warnings.warn("Tool rules only work reliably for the latest OpenAI models that support structured outputs.") + break # add default rule for having send_message be a terminal tool - if agent_state.tool_rules is None: agent_state.tool_rules = [] # Define the rule to add diff --git a/letta/schemas/letta_response.py b/letta/schemas/letta_response.py index ef66372611..db390b9f4c 100644 --- a/letta/schemas/letta_response.py +++ b/letta/schemas/letta_response.py @@ -1,3 +1,6 @@ +import html +import json +import re from typing import List, Union from pydantic import BaseModel, Field @@ -34,6 +37,113 @@ def __str__(self): indent=4, ) + def _repr_html_(self): + def get_formatted_content(msg): + if msg.message_type == "internal_monologue": + return f'
{html.escape(msg.internal_monologue)}
' + elif msg.message_type == "function_call": + args = format_json(msg.function_call.arguments) + return f'
{html.escape(msg.function_call.name)}({args})
' + elif msg.message_type == "function_return": + + return_value = format_json(msg.function_return) + # return f'
Status: {html.escape(msg.status)}
{return_value}
' + return f'
{return_value}
' + elif msg.message_type == "user_message": + if is_json(msg.message): + return f'
{format_json(msg.message)}
' + else: + return f'
{html.escape(msg.message)}
' + elif msg.message_type in ["assistant_message", "system_message"]: + return f'
{html.escape(msg.message)}
' + else: + return f'
{html.escape(str(msg))}
' + + def is_json(string): + try: + json.loads(string) + return True + except ValueError: + return False + + def format_json(json_str): + try: + parsed = json.loads(json_str) + formatted = json.dumps(parsed, indent=2, ensure_ascii=False) + formatted = formatted.replace("&", "&").replace("<", "<").replace(">", ">") + formatted = formatted.replace("\n", "
").replace(" ", "  ") + formatted = re.sub(r'(".*?"):', r'\1:', formatted) + formatted = re.sub(r': (".*?")', r': \1', formatted) + formatted = re.sub(r": (\d+)", r': \1', formatted) + formatted = re.sub(r": (true|false)", r': \1', formatted) + return formatted + except json.JSONDecodeError: + return html.escape(json_str) + + html_output = """ + +
+ """ + + for msg in self.messages: + content = get_formatted_content(msg) + title = msg.message_type.replace("_", " ").upper() + html_output += f""" +
+
{title}
+ {content} +
+ """ + html_output += "
" + + # Formatting the usage statistics + usage_html = json.dumps(self.usage.model_dump(), indent=2) + html_output += f""" +
+
+
USAGE STATISTICS
+
{format_json(usage_html)}
+
+
+ """ + + return html_output + # The streaming response is either [DONE], [DONE_STEP], [DONE], an error, or a LettaMessage LettaStreamingResponse = Union[LettaMessage, MessageStreamStatus, LettaUsageStatistics]