diff --git a/superagi/agent/agent_iteration_step_handler.py b/superagi/agent/agent_iteration_step_handler.py index 00d73b8c6..a0e26bdf1 100644 --- a/superagi/agent/agent_iteration_step_handler.py +++ b/superagi/agent/agent_iteration_step_handler.py @@ -82,7 +82,7 @@ def execute_step(self): assistant_reply = response['content'] output_handler = get_output_handler(iteration_workflow_step.output_type, agent_execution_id=self.agent_execution_id, - agent_config=agent_config, agent_tools=agent_tools) + agent_config=agent_config,memory=self.memory,agent_tools=agent_tools) response = output_handler.handle(self.session, assistant_reply) if response.status == "COMPLETE": execution.status = "COMPLETED" @@ -153,7 +153,7 @@ def _build_tools(self, agent_config: dict, agent_execution_config: dict): agent_tools.append(tool_builder.build_tool(tool)) agent_tools = [tool_builder.set_default_params_tool(tool, agent_config, agent_execution_config, - model_api_key, resource_summary) for tool in agent_tools] + model_api_key, resource_summary,self.memory) for tool in agent_tools] return agent_tools def _handle_wait_for_permission(self, agent_execution, agent_config: dict, agent_execution_config: dict, @@ -179,7 +179,7 @@ def _handle_wait_for_permission(self, agent_execution, agent_config: dict, agent return False if agent_execution_permission.status == "APPROVED": agent_tools = self._build_tools(agent_config, agent_execution_config) - tool_output_handler = ToolOutputHandler(self.agent_execution_id, agent_config, agent_tools) + tool_output_handler = ToolOutputHandler(self.agent_execution_id, agent_config, agent_tools,self.memory) tool_result = tool_output_handler.handle_tool_response(self.session, agent_execution_permission.assistant_reply) result = tool_result.result diff --git a/superagi/agent/agent_tool_step_handler.py b/superagi/agent/agent_tool_step_handler.py index 673eb8fc2..46be7aa7f 100644 --- a/superagi/agent/agent_tool_step_handler.py +++ b/superagi/agent/agent_tool_step_handler.py @@ -55,7 +55,7 @@ def execute_step(self): assistant_reply = self._process_input_instruction(agent_config, agent_execution_config, step_tool, workflow_step) tool_obj = self._build_tool_obj(agent_config, agent_execution_config, step_tool.tool_name) - tool_output_handler = ToolOutputHandler(self.agent_execution_id, agent_config, [tool_obj], + tool_output_handler = ToolOutputHandler(self.agent_execution_id, agent_config, [tool_obj],self.memory, output_parser=AgentSchemaToolOutputParser()) final_response = tool_output_handler.handle(self.session, assistant_reply) step_response = "default" @@ -119,7 +119,7 @@ def _build_tool_obj(self, agent_config, agent_execution_config, tool_name: str): tool = self.session.query(Tool).filter(Tool.name == tool_name).first() tool_obj = tool_builder.build_tool(tool) tool_obj = tool_builder.set_default_params_tool(tool_obj, agent_config, agent_execution_config, model_api_key, - resource_summary) + resource_summary,self.memory) return tool_obj def _process_output_instruction(self, final_response: str, step_tool: AgentWorkflowStepTool, diff --git a/superagi/agent/output_handler.py b/superagi/agent/output_handler.py index 97cf65814..8683007db 100644 --- a/superagi/agent/output_handler.py +++ b/superagi/agent/output_handler.py @@ -1,12 +1,15 @@ +import json from superagi.agent.common_types import TaskExecutorResponse, ToolExecutorResponse from superagi.agent.output_parser import AgentSchemaOutputParser from superagi.agent.task_queue import TaskQueue from superagi.agent.tool_executor import ToolExecutor from superagi.helper.json_cleaner import JsonCleaner from superagi.lib.logger import logger +from langchain.text_splitter import TokenTextSplitter from superagi.models.agent import Agent from superagi.models.agent_execution import AgentExecution from superagi.models.agent_execution_feed import AgentExecutionFeed +from superagi.vector_store.base import VectorStore import numpy as np from superagi.models.agent_execution_permission import AgentExecutionPermission @@ -14,13 +17,18 @@ class ToolOutputHandler: """Handles the tool output response from the thinking step""" - def __init__(self, agent_execution_id: int, agent_config: dict, - tools: list, output_parser=AgentSchemaOutputParser()): + def __init__(self, + agent_execution_id: int, + agent_config: dict, + tools: list, + memory:VectorStore=None, + output_parser=AgentSchemaOutputParser()): self.agent_execution_id = agent_execution_id self.task_queue = TaskQueue(str(agent_execution_id)) self.agent_config = agent_config self.tools = tools self.output_parser = output_parser + self.memory=memory def handle(self, session, assistant_reply): """Handles the tool output response from the thinking step. @@ -54,7 +62,36 @@ def handle(self, session, assistant_reply): if not tool_response.retry: tool_response = self._check_for_completion(tool_response) # print("Tool Response:", tool_response) + print("Here is the assistant reply: ",assistant_reply,"ENDD") + self.add_text_to_memory(assistant_reply, tool_response.result) return tool_response + + def add_text_to_memory(self, assistant_reply,tool_response_result): + """ + Adds the text generated by the assistant and tool response to the memory. + + Args: + assistant_reply (str): The assistant reply. + tool_response_result (str): The tool response. + + Returns: + None + """ + if self.memory is not None: + data = json.loads(assistant_reply) + task_description = data['thoughts']['text'] + final_tool_response = tool_response_result + prompt = task_description + final_tool_response + text_splitter = TokenTextSplitter(chunk_size=1024, chunk_overlap=10) + chunk_response = text_splitter.split_text(prompt) + metadata = {"agent_execution_id": self.agent_execution_id} + metadatas = [] + for _ in chunk_response: + metadatas.append(metadata) + + self.memory.add_texts(chunk_response, metadatas) + + def handle_tool_response(self, session, assistant_reply): """Only handle processing of tool response""" @@ -134,7 +171,7 @@ class ReplaceTaskOutputHandler: def __init__(self, agent_execution_id: int, agent_config: dict): self.agent_execution_id = agent_execution_id self.task_queue = TaskQueue(str(agent_execution_id)) - self.agent_config = agent_config + self.agent_config = agent_config def handle(self, session, assistant_reply): assistant_reply = JsonCleaner.extract_json_array_section(assistant_reply) @@ -149,11 +186,11 @@ def handle(self, session, assistant_reply): return TaskExecutorResponse(status=status, retry=False) -def get_output_handler(output_type: str, agent_execution_id: int, agent_config: dict, agent_tools: list = []): +def get_output_handler(output_type: str, agent_execution_id: int, agent_config: dict, agent_tools: list = [],memory=None): if output_type == "tools": - return ToolOutputHandler(agent_execution_id, agent_config, agent_tools) + return ToolOutputHandler(agent_execution_id, agent_config, agent_tools,memory=memory) elif output_type == "replace_tasks": return ReplaceTaskOutputHandler(agent_execution_id, agent_config) elif output_type == "tasks": return TaskOutputHandler(agent_execution_id, agent_config) - return ToolOutputHandler(agent_execution_id, agent_config, agent_tools) + return ToolOutputHandler(agent_execution_id, agent_config, agent_tools,memory=memory) diff --git a/superagi/agent/tool_builder.py b/superagi/agent/tool_builder.py index 33a0395de..f7898a129 100644 --- a/superagi/agent/tool_builder.py +++ b/superagi/agent/tool_builder.py @@ -31,6 +31,7 @@ def __init__(self, session, agent_id: int, agent_execution_id: int = None): self.session = session self.agent_id = agent_id self.agent_execution_id = agent_execution_id + def __validate_filename(self, filename): """ @@ -78,7 +79,7 @@ def build_tool(self, tool: Tool): return new_object def set_default_params_tool(self, tool, agent_config, agent_execution_config, model_api_key: str, - resource_summary: str = ""): + resource_summary: str = "",memory=None): """ Set the default parameters for the tools. @@ -110,7 +111,7 @@ def set_default_params_tool(self, tool, agent_config, agent_execution_config, mo agent_execution_id=self.agent_execution_id) if hasattr(tool, 'tool_response_manager'): tool.tool_response_manager = ToolResponseQueryManager(session=self.session, - agent_execution_id=self.agent_execution_id) + agent_execution_id=self.agent_execution_id,memory=memory) if tool.name == "QueryResourceTool": tool.description = tool.description.replace("{summary}", resource_summary) diff --git a/superagi/jobs/agent_executor.py b/superagi/jobs/agent_executor.py index 0f15cd869..08b7f94d5 100644 --- a/superagi/jobs/agent_executor.py +++ b/superagi/jobs/agent_executor.py @@ -18,6 +18,8 @@ from superagi.types.vector_store_types import VectorStoreType from superagi.vector_store.embedding.openai import OpenAiEmbedding from superagi.vector_store.vector_factory import VectorFactory +from superagi.vector_store.redis import Redis +from superagi.config.config import get_config # from superagi.helper.tool_helper import get_tool_config_by_key @@ -54,11 +56,11 @@ def execute_next_step(self, agent_execution_id): model_api_key = AgentConfiguration.get_model_api_key(session, agent_execution.agent_id, agent_config["model"]) model_llm_source = ModelSourceType.get_model_source_from_model(agent_config["model"]).value try: - vector_store_type = VectorStoreType.get_vector_store_type(agent_config["LTM_DB"]) + vector_store_type = VectorStoreType.get_vector_store_type(get_config("LTM_DB","Redis")) memory = VectorFactory.get_vector_storage(vector_store_type, "super-agent-index1", AgentExecutor.get_embedding(model_llm_source, model_api_key)) except: - logger.info("Unable to setup the pinecone connection...") + logger.info("Unable to setup the connection...") memory = None agent_workflow_step = session.query(AgentWorkflowStep).filter( diff --git a/superagi/tools/thinking/prompts/thinking.txt b/superagi/tools/thinking/prompts/thinking.txt index c5fdb002b..ed623e61a 100644 --- a/superagi/tools/thinking/prompts/thinking.txt +++ b/superagi/tools/thinking/prompts/thinking.txt @@ -7,6 +7,9 @@ and the following task, `{task_description}`. Below is last tool response: `{last_tool_response}` +Below is the relevant tool response: +`{relevant_tool_response}` + Perform the task by understanding the problem, extracting variables, and being smart and efficient. Provide a descriptive response, make decisions yourself when confronted with choices and provide reasoning for ideas / decisions. \ No newline at end of file diff --git a/superagi/tools/thinking/tools.py b/superagi/tools/thinking/tools.py index 2c2e0b869..8bdc38ca5 100644 --- a/superagi/tools/thinking/tools.py +++ b/superagi/tools/thinking/tools.py @@ -33,6 +33,7 @@ class ThinkingTool(BaseTool): ) args_schema: Type[ThinkingSchema] = ThinkingSchema goals: List[str] = [] + agent_execution_id:int=None permission_required: bool = False tool_response_manager: Optional[ToolResponseQueryManager] = None @@ -56,6 +57,9 @@ def _execute(self, task_description: str): prompt = prompt.replace("{task_description}", task_description) last_tool_response = self.tool_response_manager.get_last_response() prompt = prompt.replace("{last_tool_response}", last_tool_response) + metadata = {"agent_execution_id":self.agent_execution_id} + relevant_tool_response = self.tool_response_manager.get_relevant_response(query=task_description,metadata=metadata) + prompt = prompt.replace("{relevant_tool_response}",relevant_tool_response) messages = [{"role": "system", "content": prompt}] result = self.llm.chat_completion(messages, max_tokens=self.max_token_limit) return result["content"] diff --git a/superagi/tools/tool_response_query_manager.py b/superagi/tools/tool_response_query_manager.py index 5a2387648..c7e952411 100644 --- a/superagi/tools/tool_response_query_manager.py +++ b/superagi/tools/tool_response_query_manager.py @@ -1,12 +1,22 @@ from sqlalchemy.orm import Session from superagi.models.agent_execution_feed import AgentExecutionFeed +from superagi.vector_store.base import VectorStore class ToolResponseQueryManager: - def __init__(self, session: Session, agent_execution_id: int): + def __init__(self, session: Session, agent_execution_id: int,memory:VectorStore): self.session = session self.agent_execution_id = agent_execution_id - + self.memory=memory + + def get_last_response(self, tool_name: str = None): return AgentExecutionFeed.get_last_tool_response(self.session, self.agent_execution_id, tool_name) + + def get_relevant_response(self, query: str,metadata:dict, top_k: int = 5): + documents = self.memory.get_matching_text(query, metadata=metadata) + relevant_responses = "" + for document in documents["documents"]: + relevant_responses += document.text_content + return relevant_responses \ No newline at end of file diff --git a/superagi/vector_store/embedding/openai.py b/superagi/vector_store/embedding/openai.py index 284f0056a..12439b466 100644 --- a/superagi/vector_store/embedding/openai.py +++ b/superagi/vector_store/embedding/openai.py @@ -5,6 +5,18 @@ class OpenAiEmbedding: def __init__(self, api_key, model="text-embedding-ada-002"): self.model = model self.api_key = api_key + + async def get_embedding_async(self, text: str): + try: + # openai.api_key = get_config("OPENAI_API_KEY") + openai.api_key = self.api_key + response = await openai.Embedding.create( + input=[text], + engine=self.model + ) + return response['data'][0]['embedding'] + except Exception as exception: + return {"error": exception} def get_embedding(self, text): try: diff --git a/superagi/vector_store/redis.py b/superagi/vector_store/redis.py index e69de29bb..a343c5e5a 100644 --- a/superagi/vector_store/redis.py +++ b/superagi/vector_store/redis.py @@ -0,0 +1,171 @@ +import json +import re +import uuid +from typing import Any, List, Iterable, Mapping +from typing import Optional, Pattern +import traceback +import numpy as np +import redis +from redis.commands.search.field import TagField, VectorField +from redis.commands.search.indexDefinition import IndexDefinition, IndexType + +from superagi.config.config import get_config +from superagi.lib.logger import logger +from superagi.vector_store.base import VectorStore +from superagi.vector_store.document import Document + +DOC_PREFIX = "doc:" + +CONTENT_KEY = "content" +METADATA_KEY = "metadata" +VECTOR_SCORE_KEY = "vector_score" + + +class Redis(VectorStore): + + def delete_embeddings_from_vector_db(self, ids: List[str]) -> None: + pass + + def add_embeddings_to_vector_db(self, embeddings: dict) -> None: + pass + + def get_index_stats(self) -> dict: + pass + + DEFAULT_ESCAPED_CHARS = r"[,.<>{}\[\]\\\"\':;!@#$%^&*()\-+=~\/ ]" + + def __init__(self, index: Any, embedding_model: Any): + """ + Args: + index: An instance of a Redis index. + embedding_model: An instance of a BaseEmbedding model. + vector_group_id: vector group id used to index similar vectors. + """ + redis_url = get_config('REDIS_URL') + self.redis_client = redis.Redis.from_url("redis://" + redis_url + "/0", decode_responses=True) + # self.redis_client = redis.Redis(host=redis_host, port=redis_port) + self.index = index + self.embedding_model = embedding_model + self.content_key = "content", + self.metadata_key = "metadata" + self.index = index + self.vector_key = "content_vector" + + def build_redis_key(self, prefix: str) -> str: + """Build a redis key with a prefix.""" + return f"{prefix}:{uuid.uuid4().hex}" + + def add_texts(self, texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + embeddings: Optional[List[List[float]]] = None, + ids: Optional[list[str]] = None, + **kwargs: Any) -> List[str]: + pipe = self.redis_client.pipeline() + prefix = DOC_PREFIX + str(self.index) + keys = [] + for i, text in enumerate(texts): + id = ids[i] if ids else self.build_redis_key(prefix) + metadata = metadatas[i] if metadatas else {} + embedding = self.embedding_model.get_embedding(text) + embedding_arr = np.array(embedding, dtype=np.float32) + + pipe.hset(id, mapping={CONTENT_KEY: text, self.vector_key: embedding_arr.tobytes(), + METADATA_KEY: json.dumps(metadata)}) + + keys.append(id) + pipe.execute() + return keys + + def get_matching_text(self, query: str, top_k: int = 5, metadata: Optional[dict] = None, **kwargs: Any) -> List[Document]: + + embed_text = self.embedding_model.get_embedding(query) + from redis.commands.search.query import Query + hybrid_fields = self._convert_to_redis_filters(metadata) + + base_query = f"{hybrid_fields}=>[KNN {top_k} @{self.vector_key} $vector AS vector_score]" + return_fields = [METADATA_KEY,CONTENT_KEY, "vector_score","id"] + + + query = ( + Query(base_query) + .return_fields(*return_fields) + .sort_by("vector_score") + .paging(0, top_k) + .dialect(2) + ) + + params_dict: Mapping[str, str] = { + "vector": np.array(embed_text) + .astype(dtype=np.float32) + .tobytes() + } + + # print(self.index) + results = self.redis_client.ft(self.index).search(query,params_dict) + + # Prepare document results + documents = [] + for result in results.docs: + documents.append( + Document( + text_content=result.content, + metadata=json.loads(result.metadata) + ) + ) + return {"documents": documents} + + + + def _convert_to_redis_filters(self, metadata: Optional[dict] = {}) -> str: + if metadata is None or len(metadata) == 0: + return "*" + filter_strings = [] + for key in metadata.keys(): + filter_string = "@%s:{%s}" % (key, self.escape_token(str(metadata[key]))) + filter_strings.append(filter_string) + + joined_filter_strings = " & ".join(filter_strings) + return f"({joined_filter_strings})" + + def create_index(self): + try: + # check to see if index exists + temp = self.redis_client.ft(self.index).info() + logger.info(temp) + logger.info("Index already exists!") + except: + vector_dimensions = self.embedding_model.get_embedding("sample") + # schema + schema = ( + TagField("tag"), # Tag Field Name + VectorField(self.vector_key, # Vector Field Name + "FLAT", { # Vector Index Type: FLAT or HNSW + "TYPE": "FLOAT32", # FLOAT32 or FLOAT64 + "DIM": len(vector_dimensions), # Number of Vector Dimensions + "DISTANCE_METRIC": "COSINE", # Vector Search Distance Metric + } + ) + ) + + # index Definition + definition = IndexDefinition(prefix=[DOC_PREFIX], index_type=IndexType.HASH) + + # create Index + self.redis_client.ft(self.index).create_index(fields=schema, definition=definition) + + def escape_token(self, value: str) -> str: + """ + Escape punctuation within an input string. Taken from RedisOM Python. + + Args: + value (str): The input string. + + Returns: + str: The escaped string. + """ + escaped_chars_re = re.compile(Redis.DEFAULT_ESCAPED_CHARS) + + def escape_symbol(match: re.Match) -> str: + return f"\\{match.group(0)}" + + return escaped_chars_re.sub(escape_symbol, value) \ No newline at end of file diff --git a/superagi/vector_store/vector_factory.py b/superagi/vector_store/vector_factory.py index 4d9dbd157..165950806 100644 --- a/superagi/vector_store/vector_factory.py +++ b/superagi/vector_store/vector_factory.py @@ -7,8 +7,9 @@ from superagi.lib.logger import logger from superagi.types.vector_store_types import VectorStoreType from superagi.vector_store import qdrant - from superagi.vector_store.qdrant import Qdrant +from superagi.vector_store.redis import Redis +from superagi.vector_store.embedding.openai import OpenAiEmbedding class VectorFactory: @@ -73,7 +74,11 @@ def get_vector_storage(cls, vector_store: VectorStoreType, index_name, embedding Qdrant.create_collection(client, index_name, len(sample_embedding)) return qdrant.Qdrant(client, embedding_model, index_name) - raise ValueError(f"Vector store {vector_store} not supported") + if vector_store == VectorStoreType.REDIS: + index_name = "super-agent-index1" + redis = Redis(index_name, embedding_model) + redis.create_index() + return redis @classmethod def build_vector_storage(cls, vector_store: VectorStoreType, index_name, embedding_model = None, **creds): diff --git a/tests/unit_tests/agent/test_output_handler.py b/tests/unit_tests/agent/test_output_handler.py index 858fdbc02..8bd13016e 100644 --- a/tests/unit_tests/agent/test_output_handler.py +++ b/tests/unit_tests/agent/test_output_handler.py @@ -9,6 +9,8 @@ from superagi.helper.json_cleaner import JsonCleaner from superagi.models.agent import Agent from superagi.models.agent_execution_permission import AgentExecutionPermission +import numpy as np +from superagi.agent.output_handler import ToolOutputHandler # Test for ToolOutputHandler @@ -21,12 +23,13 @@ def test_tool_output_handle(parse_mock, execute_mock, get_completed_tasks_mock, agent_execution_id = 11 agent_config = {"agent_id": 22, "permission_type": "unrestricted"} assistant_reply = '{"tool": {"name": "someAction", "args": ["arg1", "arg2"]}}' + parse_mock.return_value = AgentGPTAction(name="someAction", args=["arg1", "arg2"]) # Define what the mock response status should be execute_mock.return_value = Mock(status='PENDING', is_permission_required=False) - handler = ToolOutputHandler(agent_execution_id, agent_config, []) + handler = ToolOutputHandler(agent_execution_id, agent_config, [],None) # Mock session session_mock = MagicMock() @@ -40,6 +43,33 @@ def test_tool_output_handle(parse_mock, execute_mock, get_completed_tasks_mock, assert response.status == "PENDING" parse_mock.assert_called_with(assistant_reply) assert session_mock.add.call_count == 2 + +@patch('superagi.agent.output_handler.TokenTextSplitter') +def test_add_text_to_memory(TokenTextSplitter_mock): + # Arrange + agent_execution_id = 1 + agent_config = {"agent_id": 2} + tool_output_handler = ToolOutputHandler(agent_execution_id, agent_config,[], None) + + assistant_reply = '{"thoughts": {"text": "This is a task."}}' + tool_response_result = '["Task completed."]' + + text_splitter_mock = MagicMock() + TokenTextSplitter_mock.return_value = text_splitter_mock + text_splitter_mock.split_text.return_value = ["This is a task.", "Task completed."] + + # Mock the VectorStore memory + memory_mock = MagicMock() + tool_output_handler.memory = memory_mock + + # Act + tool_output_handler.add_text_to_memory(assistant_reply, tool_response_result) + + # Assert + TokenTextSplitter_mock.assert_called_once_with(chunk_size=1024, chunk_overlap=10) + text_splitter_mock.split_text.assert_called_once_with('This is a task.["Task completed."]') + memory_mock.add_texts.assert_called_once_with(["This is a task.", "Task completed."], [{"agent_execution_id": agent_execution_id}, {"agent_execution_id": agent_execution_id}]) + @patch('superagi.models.agent_execution_permission.AgentExecutionPermission') def test_tool_handler_check_permission_in_restricted_mode(op_mock): @@ -54,7 +84,7 @@ def test_tool_handler_check_permission_in_restricted_mode(op_mock): tool = MagicMock() tool.name = "someAction" tool.permission_required = True - handler = ToolOutputHandler(agent_execution_id, agent_config, [tool]) + handler = ToolOutputHandler(agent_execution_id, agent_config, [tool],None) # Act response = handler._check_permission_in_restricted_mode(session_mock, assistant_reply) diff --git a/tests/unit_tests/vector_store/test_redis.py b/tests/unit_tests/vector_store/test_redis.py new file mode 100644 index 000000000..391a444f6 --- /dev/null +++ b/tests/unit_tests/vector_store/test_redis.py @@ -0,0 +1,42 @@ +from unittest.mock import MagicMock, patch +import numpy as np +from superagi.vector_store.document import Document +from superagi.vector_store.redis import Redis + + +def test_escape_token(): + redis_object = Redis(None, None) + escaped_token = redis_object.escape_token("An,example.<> string!") + assert escaped_token == "An\\,example\\.\\<\\>\\ string\\!" + +@patch('redis.Redis') +def test_add_texts(redis_mock): + # Arrange + mock_index = "mock_index" + mock_embedding_model = MagicMock() + redis_object = Redis(mock_index, mock_embedding_model) + redis_object.build_redis_key = MagicMock(return_value="mock_key") + texts = ["Hello", "World"] + metadatas = [{"data": 1}, {"data": 2}] + + # Act + redis_object.add_texts(texts, metadatas) + + # Assert + assert redis_object.redis_client.pipeline().hset.call_count == len(texts) + +@patch('redis.Redis') +def test_get_matching_text(redis_mock): + # Arrange + mock_index = "mock_index" + redis_object = Redis(mock_index, None) + redis_object.embedding_model = MagicMock() + redis_object.embedding_model.get_embedding.return_value = np.array([0.1, 0.2, 0.3]) + query = "mock_query" + + # Act + result = redis_object.get_matching_text(query, metadata={}) + + # Assert + redis_object.embedding_model.get_embedding.assert_called_once_with(query) + assert "documents" in result \ No newline at end of file