Skip to content

Commit

Permalink
Merge pull request #41 from plastic-labs/agent-abstraction
Browse files Browse the repository at this point in the history
Agent abstraction
  • Loading branch information
vintrocode authored Aug 3, 2023
2 parents d60a0e5 + 6c236ac commit f24a8fb
Show file tree
Hide file tree
Showing 20 changed files with 135 additions and 56 deletions.
9 changes: 3 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,9 @@ RUN poetry config virtualenvs.create false \
RUN addgroup --system app && adduser --system --group app
USER app

COPY app.py .
COPY chain.py .
COPY globals.py .
COPY cache.py .
COPY discord_gateway.py .
COPY agent/ agent/
COPY bot/ bot/
COPY data/ data/

# https://stackoverflow.com/questions/29663459/python-app-does-not-print-anything-when-running-detached-in-docker
CMD ["python", "-u", "app.py"]
CMD ["python", "-u", "discord_gateway.py"]
21 changes: 20 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ This app requires you to have a few different environment variables set. Create
**BOT_TOKEN**: This is the discord bot token. You can find instructions on how to create a bot and generate a token in the [pycord docs](https://guide.pycord.dev/getting-started/creating-your-first-bot).
**THOUGHT_CHANNEL_ID**: This is the discord channel for the bot to output thoughts to. Make a channel in your server and copy the ID by right clicking the channel and copying the link. The channel ID is the last string of numbers in the link.

## Docker/Containerization
### Docker/Containerization

The repository containers a `Dockerfile` for running the bot in a containerized workflow. Use the following command to build and run the container locally:

Expand All @@ -28,3 +28,22 @@ docker run --env-file .env tutor-gpt
```

The current behaviour will utilize the `.env` file in your local repository and run the bot.

### Architecture

Below is high level diagram of the architecture for the bot.
![Tutor-GPT Discord Architecture](assets/bloombot_langchain_diagram.png)

## Contributing

This project uses [poetry](https://python-poetry.org/) to manage dependencies.
To install dependencies locally run `poetry install`. Or alternatively run
`poetry shell` to activate the virtual environment

To activate the virtual environment within the same shell you can use the
following one-liner

```bash
source $(poetry env info --path)/bin/activate
```

Empty file added agent/__init__.py
Empty file.
34 changes: 33 additions & 1 deletion cache.py → agent/cache.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,40 @@
"""
Below is an implementation of a basic LRUcache that utilizes the built
in OrderedDict data structure.
"""
from collections import OrderedDict

class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.cache = OrderedDict()

def get(self, key):
if key not in self.cache:
return None

# Move the accessed key to the end to indicate it was recently used
self.cache.move_to_end(key)
return self.cache[key]

def put(self, key, value):
if key in self.cache:
# If the key already exists, move it to the end and update the value
self.cache.move_to_end(key)
else:
if len(self.cache) >= self.capacity:
# If the cache is full, remove the least recently used key-value pair (the first item in the OrderedDict)
self.cache.popitem(last=False)

# Add or update the key-value pair at the end of the OrderedDict
self.cache[key] = value

"""
In this implementation, the _move_to_front method is used to move a key to the front of
the linked list when it is accessed. The _add_to_front method is used to add a new key to the
front of the linked list. The _remove_least_recently_used method is used to remove the least recently
used key from the cache when the capacity is exceeded. The _remove_node method is a helper method that removes
"""
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
Expand Down Expand Up @@ -93,3 +124,4 @@ def _remove_node(self, node):
else:
node['next']['prev'] = node['prev']
"""
14 changes: 9 additions & 5 deletions chain.py → agent/chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,15 @@

load_dotenv()


OBJECTIVE_SYSTEM_THOUGHT = load_prompt("data/prompts/objective/system/thought.yaml")
OBJECTIVE_SYSTEM_RESPONSE = load_prompt("data/prompts/objective/system/response.yaml")
OBJECTIVE_HUMAN_THOUGHT = load_prompt("data/prompts/objective/human/thought.yaml")
OBJECTIVE_HUMAN_RESPONSE = load_prompt("data/prompts/objective/human/response.yaml")
OBJECTIVE_SYSTEM_THOUGHT = load_prompt(os.path.join(os.path.dirname(__file__), 'data/prompts/objective/system/thought.yaml'))
OBJECTIVE_SYSTEM_RESPONSE = load_prompt(os.path.join(os.path.dirname(__file__), 'data/prompts/objective/system/response.yaml'))
OBJECTIVE_HUMAN_THOUGHT = load_prompt(os.path.join(os.path.dirname(__file__), 'data/prompts/objective/human/thought.yaml'))
OBJECTIVE_HUMAN_RESPONSE = load_prompt(os.path.join(os.path.dirname(__file__), 'data/prompts/objective/human/response.yaml'))

# OBJECTIVE_SYSTEM_THOUGHT = load_prompt("./data/prompts/objective/system/thought.yaml")
# OBJECTIVE_SYSTEM_RESPONSE = load_prompt("./data/prompts/objective/system/response.yaml")
# OBJECTIVE_HUMAN_THOUGHT = load_prompt("./data/prompts/objective/human/thought.yaml")
# OBJECTIVE_HUMAN_RESPONSE = load_prompt("./data/prompts/objective/human/response.yaml")
# OBJECTIVE_SUMMARY_THOUGHT = load_prompt("data/prompts/objective/summaries/thought.yaml")
# OBJECTIVE_SUMMARY_RESPONSE = load_prompt("data/prompts/objective/summaries/response.yaml")

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
29 changes: 29 additions & 0 deletions redis_cache.py → agent/redis_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,3 +123,32 @@ def set(self, key, value):
key_to_remove, _ = self.memory_cache.popitem(last=False)
# Remove the corresponding key-value pair from the Redis cache
self.redis_cache.delete(key_to_remove)

"""
Below is an implementation of a basic LRUcache that utilizes the built
in OrderedDict data structure.
"""
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.cache = OrderedDict()

def get(self, key):
if key not in self.cache:
return None

# Move the accessed key to the end to indicate it was recently used
self.cache.move_to_end(key)
return self.cache[key]

def put(self, key, value):
if key in self.cache:
# If the key already exists, move it to the end and update the value
self.cache.move_to_end(key)
else:
if len(self.cache) >= self.capacity:
# If the cache is full, remove the least recently used key-value pair (the first item in the OrderedDict)
self.cache.popitem(last=False)

# Add or update the key-value pair at the end of the OrderedDict
self.cache[key] = value
23 changes: 0 additions & 23 deletions app.py

This file was deleted.

File renamed without changes
Empty file added bot/__init__.py
Empty file.
38 changes: 21 additions & 17 deletions bot/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,25 @@

import discord
import time
import globals
from discord_gateway import (
OBJECTIVE_THOUGHT_CHAIN,
OBJECTIVE_RESPONSE_CHAIN,
CACHE,
THOUGHT_CHANNEL,
)
from discord.ext import commands
from typing import Optional
from chain import chat, ConversationCache
from agent.chain import chat, ConversationCache


class Core(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot

async def chat_and_save(self, local_chain: ConversationCache, input: str) -> tuple[str, str]:
thought_chain = globals.OBJECTIVE_THOUGHT_CHAIN
response_chain = globals.OBJECTIVE_RESPONSE_CHAIN # if local_chain.conversation_type == "discuss" else globals.WORKSHOP_RESPONSE_CHAIN
# response_chain = local_chain.conversation_type == "discuss" ? globals.DISCUSS_RESPONSE_CHAIN : globals.WORKSHOP_RESPONSE_CHAIN
thought_chain = OBJECTIVE_THOUGHT_CHAIN
response_chain = OBJECTIVE_RESPONSE_CHAIN # if local_chain.conversation_type == "discuss" else WORKSHOP_RESPONSE_CHAIN
# response_chain = local_chain.conversation_type == "discuss" ? DISCUSS_RESPONSE_CHAIN : WORKSHOP_RESPONSE_CHAIN

thought = await chat(
inp=input,
Expand Down Expand Up @@ -61,18 +66,18 @@ async def on_message(self, message):

# if the message came from a DM channel...
if isinstance(message.channel, discord.channel.DMChannel):
LOCAL_CHAIN = globals.CACHE.get(message.channel.id)
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
globals.CACHE.put(message.channel.id, LOCAL_CHAIN)
CACHE.put(message.channel.id, LOCAL_CHAIN)

i = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')

start = time.time()
async with message.channel.typing():
thought, response = await self.chat_and_save(LOCAL_CHAIN, i)

thought_channel = self.bot.get_channel(int(globals.THOUGHT_CHANNEL))
thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"DM: {message.author.mention}"
n = 1800
if len(thought) > n:
Expand Down Expand Up @@ -100,18 +105,18 @@ async def on_message(self, message):
# if the user mentioned the bot outside of DMs...
if not isinstance(message.channel, discord.channel.DMChannel):
if str(self.bot.user.id) in message.content:
LOCAL_CHAIN = globals.CACHE.get(message.channel.id)
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
globals.CACHE.put(message.channel.id, LOCAL_CHAIN)
CACHE.put(message.channel.id, LOCAL_CHAIN)

i = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')

start = time.time()
async with message.channel.typing():
thought, response = await self.chat_and_save(LOCAL_CHAIN, i)

thought_channel = self.bot.get_channel(int(globals.THOUGHT_CHANNEL))
thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}"
n = 1800
if len(thought) > n:
Expand Down Expand Up @@ -139,10 +144,10 @@ async def on_message(self, message):
# if the user replied to the bot outside of DMs...
if not isinstance(message.channel, discord.channel.DMChannel):
if message.reference is not None:
LOCAL_CHAIN = globals.CACHE.get(message.channel.id)
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
globals.CACHE.put(message.channel.id, LOCAL_CHAIN)
CACHE.put(message.channel.id, LOCAL_CHAIN)
# and if the referenced message is from the bot...
reply_msg = await self.bot.get_channel(message.channel.id).fetch_message(message.reference.message_id)
if reply_msg.author == self.bot.user:
Expand All @@ -156,7 +161,7 @@ async def on_message(self, message):
async with message.channel.typing():
thought, response = await self.chat_and_save(LOCAL_CHAIN, i)

thought_channel = self.bot.get_channel(int(globals.THOUGHT_CHANNEL))
thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}"
n = 1800
if len(thought) > n:
Expand Down Expand Up @@ -220,13 +225,12 @@ async def restart(self, ctx: discord.ApplicationContext, respond: Optional[bool]
Args:
ctx: context, necessary for bot commands
"""
LOCAL_CHAIN = globals.CACHE.get(ctx.channel_id)
LOCAL_CHAIN = CACHE.get(ctx.channel_id)
if LOCAL_CHAIN:
LOCAL_CHAIN.restart()
else:
LOCAL_CHAIN = ConversationCache()
globals.CACHE.put(ctx.channel_id, LOCAL_CHAIN )
# globals.restart()
CACHE.put(ctx.channel_id, LOCAL_CHAIN )

if respond:
msg = "Great! The conversation has been restarted. What would you like to talk about?"
Expand Down
23 changes: 20 additions & 3 deletions globals.py → discord_gateway.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import os
import discord
from dotenv import load_dotenv
from chain import load_chains
from cache import LRUCache

from agent.chain import load_chains
from agent.cache import LRUCache
load_dotenv()
token = os.environ['BOT_TOKEN']

def init():
global OBJECTIVE_THOUGHT_CHAIN, \
Expand All @@ -17,3 +19,18 @@ def init():
OBJECTIVE_THOUGHT_CHAIN,
OBJECTIVE_RESPONSE_CHAIN,
) = load_chains()

init()

intents = discord.Intents.default()
intents.messages = True
intents.message_content = True
intents.members = True

bot = discord.Bot(intents=intents)


bot.load_extension("bot.core")


bot.run(token)

0 comments on commit f24a8fb

Please sign in to comment.