Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(agent): Refactor resource of agents #1518

Merged
merged 6 commits into from
May 15, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 0 additions & 24 deletions .env.template
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
#*******************************************************************#
#** DB-GPT - GENERAL SETTINGS **#
#*******************************************************************#
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option:
## pilot.commands.query_execute

## For example, to disable coding related features, uncomment the next line
# DISABLED_COMMAND_CATEGORIES=

#*******************************************************************#
#** Webserver Port **#
Expand Down Expand Up @@ -125,25 +120,6 @@ LOCAL_DB_TYPE=sqlite
#*******************************************************************#
EXECUTE_LOCAL_COMMANDS=False



#*******************************************************************#
#** ALLOWLISTED PLUGINS **#
#*******************************************************************#

#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
#DENYLISTED_PLUGINS - Sets the listed plugins that are not allowed (Example: plugin1,plugin2,plugin3)
ALLOWLISTED_PLUGINS=
DENYLISTED_PLUGINS=


#*******************************************************************#
#** CHAT PLUGIN SETTINGS **#
#*******************************************************************#
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
# CHAT_MESSAGES_ENABLED=False


#*******************************************************************#
#** VECTOR STORE SETTINGS **#
#*******************************************************************#
Expand Down
32 changes: 1 addition & 31 deletions dbgpt/_private/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,11 @@
from __future__ import annotations

import os
from typing import TYPE_CHECKING, List, Optional
from typing import TYPE_CHECKING, Optional

from dbgpt.util.singleton import Singleton

if TYPE_CHECKING:
from auto_gpt_plugin_template import AutoGPTPluginTemplate

from dbgpt.agent.plugin import CommandRegistry
from dbgpt.component import SystemApp
from dbgpt.datasource.manages import ConnectorManager

Expand Down Expand Up @@ -165,40 +162,13 @@ def __init__(self) -> None:
from dbgpt.core._private.prompt_registry import PromptTemplateRegistry

self.prompt_template_registry = PromptTemplateRegistry()
### Related configuration of built-in commands
self.command_registry: Optional[CommandRegistry] = None

disabled_command_categories = os.getenv("DISABLED_COMMAND_CATEGORIES")
if disabled_command_categories:
self.disabled_command_categories = disabled_command_categories.split(",")
else:
self.disabled_command_categories = []

self.execute_local_commands = (
os.getenv("EXECUTE_LOCAL_COMMANDS", "False").lower() == "true"
)
### message stor file
self.message_dir = os.getenv("MESSAGE_HISTORY_DIR", "../../message")

### The associated configuration parameters of the plug-in control the loading and use of the plug-in

self.plugins: List["AutoGPTPluginTemplate"] = []
self.plugins_openai = [] # type: ignore
self.plugins_auto_load = os.getenv("AUTO_LOAD_PLUGIN", "True").lower() == "true"

self.plugins_git_branch = os.getenv("PLUGINS_GIT_BRANCH", "plugin_dashboard")

plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
if plugins_allowlist:
self.plugins_allowlist = plugins_allowlist.split(",")
else:
self.plugins_allowlist = []

plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
if plugins_denylist:
self.plugins_denylist = plugins_denylist.split(",")
else:
self.plugins_denylist = []
### Native SQL Execution Capability Control Configuration
self.NATIVE_SQL_CAN_RUN_DDL = (
os.getenv("NATIVE_SQL_CAN_RUN_DDL", "True").lower() == "true"
Expand Down
4 changes: 1 addition & 3 deletions dbgpt/agent/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
from .core.profile import * # noqa: F401, F403
from .core.schema import PluginStorageType # noqa: F401
from .core.user_proxy_agent import UserProxyAgent # noqa: F401
from .resource.resource_api import AgentResource, ResourceType # noqa: F401
from .resource.resource_loader import ResourceLoader # noqa: F401
from .resource.base import AgentResource, Resource, ResourceType # noqa: F401
from .util.llm.llm import LLMConfig # noqa: F401

__ALL__ = [
Expand All @@ -38,7 +37,6 @@
"GptsMemory",
"AgentResource",
"ResourceType",
"ResourceLoader",
"PluginStorageType",
"UserProxyAgent",
]
11 changes: 5 additions & 6 deletions dbgpt/agent/core/action/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@
from dbgpt.util.json_utils import find_json_objects
from dbgpt.vis.base import Vis

from ...resource.resource_api import AgentResource, ResourceType
from ...resource.resource_loader import ResourceLoader
from ...resource.base import AgentResource, Resource, ResourceType

T = TypeVar("T", bound=Union[BaseModel, List[BaseModel], None])

Expand Down Expand Up @@ -77,11 +76,11 @@ class Action(ABC, Generic[T]):

def __init__(self):
"""Create an action."""
self.resource_loader: Optional[ResourceLoader] = None
self.resource: Optional[Resource] = None

def init_resource_loader(self, resource_loader: Optional[ResourceLoader]):
"""Initialize the resource loader."""
self.resource_loader = resource_loader
def init_resource(self, resource: Optional[Resource]):
"""Initialize the resource."""
self.resource = resource

@property
def resource_need(self) -> Optional[ResourceType]:
Expand Down
2 changes: 1 addition & 1 deletion dbgpt/agent/core/action/blank_action.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
from typing import Optional

from ...resource.resource_api import AgentResource
from ...resource.base import AgentResource
from .base import Action, ActionOutput

logger = logging.getLogger(__name__)
Expand Down
2 changes: 0 additions & 2 deletions dbgpt/agent/core/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from dbgpt.core import LLMClient
from dbgpt.util.annotations import PublicAPI

from ..resource.resource_loader import ResourceLoader
from .action.base import ActionOutput
from .memory.agent_memory import AgentMemory

Expand Down Expand Up @@ -209,7 +208,6 @@ class AgentGenerateContext:

memory: Optional[AgentMemory] = None
agent_context: Optional[AgentContext] = None
resource_loader: Optional[ResourceLoader] = None
llm_client: Optional[LLMClient] = None

round_index: Optional[int] = None
Expand Down
4 changes: 2 additions & 2 deletions dbgpt/agent/core/agent_manage.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,15 @@ def after_start(self):
from ..expand.code_assistant_agent import CodeAssistantAgent
from ..expand.dashboard_assistant_agent import DashboardAssistantAgent
from ..expand.data_scientist_agent import DataScientistAgent
from ..expand.plugin_assistant_agent import PluginAssistantAgent
from ..expand.summary_assistant_agent import SummaryAssistantAgent
from ..expand.tool_assistant_agent import ToolAssistantAgent

core_agents = set()
core_agents.add(self.register_agent(CodeAssistantAgent))
core_agents.add(self.register_agent(DashboardAssistantAgent))
core_agents.add(self.register_agent(DataScientistAgent))
core_agents.add(self.register_agent(SummaryAssistantAgent))
core_agents.add(self.register_agent(PluginAssistantAgent))
core_agents.add(self.register_agent(ToolAssistantAgent))
self._core_agents = core_agents

def register_agent(
Expand Down
104 changes: 39 additions & 65 deletions dbgpt/agent/core/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,17 @@
import asyncio
import json
import logging
from typing import Any, Dict, List, Optional, Tuple, Type, cast
from concurrent.futures import Executor, ThreadPoolExecutor
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast

from dbgpt._private.pydantic import ConfigDict, Field
from dbgpt.core import LLMClient, ModelMessageRoleType
from dbgpt.util.error_types import LLMChatError
from dbgpt.util.executor_utils import blocking_func_to_async
from dbgpt.util.tracer import SpanType, root_tracer
from dbgpt.util.utils import colored

from ..resource.resource_api import AgentResource, ResourceClient
from ..resource.resource_loader import ResourceLoader
from ..resource.base import Resource
from ..util.llm.llm import LLMConfig, LLMStrategyType
from ..util.llm.llm_client import AIWrapper
from .action.base import Action, ActionOutput
Expand All @@ -32,12 +33,15 @@ class ConversableAgent(Role, Agent):

agent_context: Optional[AgentContext] = Field(None, description="Agent context")
actions: List[Action] = Field(default_factory=list)
resources: List[AgentResource] = Field(default_factory=list)
resource: Optional[Resource] = Field(None, description="Resource")
llm_config: Optional[LLMConfig] = None
resource_loader: Optional[ResourceLoader] = None
max_retry_count: int = 3
consecutive_auto_reply_counter: int = 0
llm_client: Optional[AIWrapper] = None
executor: Executor = Field(
default_factory=lambda: ThreadPoolExecutor(max_workers=1),
description="Executor for running tasks",
)

def __init__(self, **kwargs):
"""Create a new agent."""
Expand All @@ -58,27 +62,12 @@ def check_available(self) -> None:
f"running!"
)

# resource check
for resource in self.resources:
if (
self.resource_loader is None
or self.resource_loader.get_resource_api(
resource.type, check_instance=False
)
is None
):
raise ValueError(
f"Resource {resource.type}:{resource.value} missing resource loader"
f" implementation,unable to read resources!"
)

# action check
if self.actions and len(self.actions) > 0:
have_resource_types = [item.type for item in self.resources]
for action in self.actions:
if (
action.resource_need
and action.resource_need not in have_resource_types
if action.resource_need and (
not self.resource
or not self.resource.get_resource_by_type(action.resource_need)
):
raise ValueError(
f"{self.name}[{self.role}] Missing resources required for "
Expand Down Expand Up @@ -112,13 +101,6 @@ def not_null_agent_context(self) -> AgentContext:
raise ValueError("Agent context is not initialized!")
return self.agent_context

@property
def not_null_resource_loader(self) -> ResourceLoader:
"""Get the resource loader."""
if not self.resource_loader:
raise ValueError("Resource loader is not initialized!")
return self.resource_loader

@property
def not_null_llm_config(self) -> LLMConfig:
"""Get the LLM config."""
Expand All @@ -134,23 +116,32 @@ def not_null_llm_client(self) -> LLMClient:
raise ValueError("LLM client is not initialized!")
return llm_client

async def blocking_func_to_async(
self, func: Callable[..., Any], *args, **kwargs
) -> Any:
"""Run a potentially blocking function within an executor."""
if not asyncio.iscoroutinefunction(func):
return await blocking_func_to_async(self.executor, func, *args, **kwargs)
return await func(*args, **kwargs)

async def preload_resource(self) -> None:
"""Preload resources before agent initialization."""
pass
if self.resource:
await self.blocking_func_to_async(self.resource.preload_resource)

async def build(self) -> "ConversableAgent":
"""Build the agent."""
# Preload resources
await self.preload_resource()
# Check if agent is available
self.check_available()
_language = self.not_null_agent_context.language
if _language:
self.language = _language

# Preload resources
await self.preload_resource()
# Initialize resource loader
for action in self.actions:
action.init_resource_loader(self.resource_loader)
action.init_resource(self.resource)

# Initialize LLM Server
if not self.is_human:
Expand All @@ -175,13 +166,8 @@ def bind(self, target: Any) -> "ConversableAgent":
raise ValueError("GptsMemory is not supported!")
elif isinstance(target, AgentContext):
self.agent_context = target
elif isinstance(target, ResourceLoader):
self.resource_loader = target
elif isinstance(target, list) and target and len(target) > 0:
if _is_list_of_type(target, Action):
self.actions.extend(target)
elif _is_list_of_type(target, AgentResource):
self.resources = target
elif isinstance(target, Resource):
self.resource = target
elif isinstance(target, AgentMemory):
self.memory = target
return self
Expand Down Expand Up @@ -480,12 +466,12 @@ async def act(
last_out: Optional[ActionOutput] = None
for i, action in enumerate(self.actions):
# Select the resources required by acton
need_resource = None
if self.resources and len(self.resources) > 0:
for item in self.resources:
if item.type == action.resource_need:
need_resource = item
break
if action.resource_need and self.resource:
need_resources = self.resource.get_resource_by_type(
action.resource_need
)
else:
need_resources = []

if not message:
raise ValueError("The message content is empty!")
Expand All @@ -497,7 +483,7 @@ async def act(
"sender": sender.name if sender else None,
"recipient": self.name,
"reviewer": reviewer.name if reviewer else None,
"need_resource": need_resource.to_dict() if need_resource else None,
"need_resource": need_resources[0].name if need_resources else None,
"rely_action_out": last_out.to_dict() if last_out else None,
"conv_uid": self.not_null_agent_context.conv_id,
"action_index": i,
Expand All @@ -506,7 +492,7 @@ async def act(
) as span:
last_out = await action.run(
ai_message=message,
resource=need_resource,
resource=None,
rely_action_out=last_out,
**kwargs,
)
Expand Down Expand Up @@ -703,23 +689,11 @@ async def generate_resource_variables(
self, question: Optional[str] = None
) -> Dict[str, Any]:
"""Generate the resource variables."""
resource_prompt_list = []
for item in self.resources:
resource_client = self.not_null_resource_loader.get_resource_api(
item.type, ResourceClient
resource_prompt = None
if self.resource:
resource_prompt = await self.resource.get_prompt(
lang=self.language, question=question
)
if not resource_client:
raise ValueError(
f"Resource {item.type}:{item.value} missing resource loader"
f" implementation,unable to read resources!"
)
resource_prompt_list.append(
await resource_client.get_resource_prompt(item, question)
)

resource_prompt = ""
if len(resource_prompt_list) > 0:
resource_prompt = "RESOURCES:" + "\n".join(resource_prompt_list)

out_schema: Optional[str] = ""
if self.actions and len(self.actions) > 0:
Expand Down
Loading
Loading