Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup #48

Merged
merged 4 commits into from
Aug 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 24 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,18 @@ The repository containers a `Dockerfile` for running the bot in a containerized

```bash
docker build -t tutor-gpt:latest .
docker run --env-file .env tutor-gpt
docker run --env-file .env tutor-gpt
```

The current behaviour will utilize the `.env` file in your local repository and run the bot.
The current behaviour will utilize the `.env` file in your local repository and
run the bot. There are two separate entry points for tutor-gpt both a discord UI
and a web ui. Below contains snippets for manually specifying the execution
environment.

```bash
docker run --env-file .env tutor-gpt python -u -m bot.app # Discord UI
docker run -p 8501:8501 --env-file .env tutor-gpt python -u -m streamlit run www/main.py # Web UI
```

### Architecture

Expand All @@ -47,3 +55,17 @@ following one-liner
source $(poetry env info --path)/bin/activate
```

On some systems this may not detect the proper virtual environment. You can
diagnose this by running `poetry env info` directly to see if the virtualenv
is defined.

If using `pyenv` remember to set **prefer-active-python** to true. As per
this section of the [documentation](https://python-poetry.org/docs/managing-environments/).

Another workaround that may work if the above setting does not work is to
continue directly with `poetry shell` or wrap the source command like below

```bash
poetry run source $(poetry env info --path)/bin/activate
```

82 changes: 9 additions & 73 deletions agent/chain.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import validators

from langchain.chat_models import ChatOpenAI
from langchain.memory import ChatMessageHistory
Expand All @@ -18,18 +17,17 @@
class ConversationCache:
"Wrapper Class for storing contexts between channels. Using an object to pass by reference avoid additional cache hits"
def __init__(self):
# self.thought_memory: ChatMessageHistory
# self.response_memory: ChatMessageHistory
self.thought_memory: ChatMessageHistory = ChatMessageHistory()
self.response_memory: ChatMessageHistory = ChatMessageHistory()


def restart(self):
def restart(self) -> None:
self.thought_memory.clear()
self.response_memory.clear()

class BloomChain:
def __init__(self, llm: ChatOpenAI, verbose: bool = False):
"Wrapper class for encapsulating the multiple different chains used in reasoning for the tutor's thoughts"
def __init__(self, llm: ChatOpenAI, verbose: bool = False) -> None:
self.llm = llm
self.verbose = verbose

Expand All @@ -38,7 +36,7 @@ def __init__(self, llm: ChatOpenAI, verbose: bool = False):
self.system_response = SystemMessagePromptTemplate(prompt=SYSTEM_RESPONSE)


async def think(self, thought_memory: ChatMessageHistory, input: str):
async def think(self, thought_memory: ChatMessageHistory, input: str) -> str:
"""Generate Bloom's thought on the user."""

# load message history
Expand All @@ -55,12 +53,12 @@ async def think(self, thought_memory: ChatMessageHistory, input: str):

# update chat memory
thought_memory.add_message(HumanMessage(content=input))
thought_memory.add_message(thought_message)
thought_memory.add_message(thought_message) # apredict_messages returns AIMessage so can add directly

return thought_message.content


async def respond(self, response_memory: ChatMessageHistory, thought: str, input: str):
async def respond(self, response_memory: ChatMessageHistory, thought: str, input: str) -> str:
"""Generate Bloom's response to the user."""

# load message history
Expand All @@ -77,78 +75,16 @@ async def respond(self, response_memory: ChatMessageHistory, thought: str, input

# update chat memory
response_memory.add_message(HumanMessage(content=input))
response_memory.add_message(response_message)
response_memory.add_message(response_message) # apredict_messages returns AIMessage so can add directly

return response_message.content

async def chat(self, cache: ConversationCache, inp: str ) -> tuple[str, str]:
thought = await self.think(cache.thought_memory, inp)
response = await self.respond(cache.response_memory, thought, inp)
return thought, response
#async def chat(self, **kwargs):
# if we sent a thought across, generate a response
# if kwargs.get('thought'):
# assert kwargs.get('response_chain'), "Please pass the response chain."
# response_chain: BloomChain = kwargs.get('response_chain')
# response_memory: ChatMessageHistory = kwargs.get('response_memory')
# inp = kwargs.get('inp')
# thought = kwargs.get('thought')

# # get the history into a string
# # history = response_memory.load_memory_variables({})['history']

# # response = response_chain.apredict(
# # input=inp,
# # thought=thought,
# # history=history
# # )

# response = response_chain.respond(response_memory, thought, inp)

# if 'Student:' in response:
# response = response.split('Student:')[0].strip()
# if 'Studen:' in response:
# response = response.split('Studen:')[0].strip()

# return response

# # otherwise, we're generating a thought
# else:
# assert kwargs.get('thought_chain'), "Please pass the thought chain."
# inp = kwargs.get('inp')
# thought_chain: BloomChain = kwargs.get('thought_chain')
# thought_memory: ChatMessageHistory = kwargs.get('thought_memory')

# # get the history into a string
# # history = thought_memory.load_memory_variables({})['history']
#
# # response = await thought_chain.apredict(
# # input=inp,
# # history=history
# # )

# response = thought_chain.think(thought_memory, inp)

# if 'Tutor:' in response:
# response = response.split('Tutor:')[0].strip()

# return response


# def load_memories():
# """Load the memory objects"""
# thought_memory: ChatMessageHistory
# response_memory: ChatMessageHistory
#
# # memory definitions
# thought_memory = ChatMessageHistory()
# response_memory = ChatMessageHistory()
#
#
# return (thought_memory, response_memory)


def load_chains():

def load_chains() -> BloomChain:
"""Logic for loading the chain you want to use should go here."""
llm = ChatOpenAI(model_name = "gpt-4", temperature=1.2)

Expand Down
144 changes: 32 additions & 112 deletions bot/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,6 @@ class Core(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot

# async def chat_and_save(self, local_chain: ConversationCache, input: str) -> tuple[str, str]:
# bloom_chain = BLOOM_CHAIN # if local_chain.conversation_type == "discuss" else WORKSHOP_RESPONSE_CHAIN
# # response_chain = local_chain.conversation_type == "discuss" ? DISCUSS_RESPONSE_CHAIN : WORKSHOP_RESPONSE_CHAIN
#
# thought = await chat(
# inp=input,
# thought_chain=bloom_chain,
# thought_memory=local_chain.thought_memory
# )
# response = await chat(
# inp=input,
# thought=thought,
# response_chain=bloom_chain,
# response_memory=local_chain.response_memory
# )
#
# return thought, response

@commands.Cog.listener()
async def on_member_join(self, member):
welcome_message = """
Expand All @@ -58,32 +40,39 @@ async def on_ready(self):

@commands.Cog.listener()
async def on_message(self, message):
# Don't let the bot reply too itself
if message.author == self.bot.user:
return

# if the message came from a DM channel...
if isinstance(message.channel, discord.channel.DMChannel):
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
CACHE.put(message.channel.id, LOCAL_CHAIN)
# Get cache for conversation
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
CACHE.put(message.channel.id, LOCAL_CHAIN)

i = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')

# Get the message content but remove any mentions
inp = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')
n = 1800

async def reply(forward_thought = True):
"Generate response too user"
start = time.time()
async with message.channel.typing():
# thought, response = await self.chat_and_save(LOCAL_CHAIN, i)
thought, response = await BLOOM_CHAIN.chat(LOCAL_CHAIN, i)
thought, response = await BLOOM_CHAIN.chat(LOCAL_CHAIN, inp)

thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"DM: {message.author.mention}"
n = 1800
if len(thought) > n:
chunks = [thought[i:i+n] for i in range(0, len(thought), n)]
for i in range(chunks):
await thought_channel.send(f"{link}\n```\nThought #{i}: {chunks[i]}\n```")
else:
await thought_channel.send(f"{link}\n```\nThought: {thought}\n```")

# Thought Forwarding
if (forward_thought):
link = f"https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}"
if len(thought) > n:
chunks = [thought[i:i+n] for i in range(0, len(thought), n)]
for i in range(len(chunks)):
await thought_channel.send(f"{link}\n```\nThought #{i}: {chunks[i]}\n```")
else:
await thought_channel.send(f"{link}\n```\nThought: {thought}\n```")

# Response Forwarding
if len(response) > n:
chunks = [response[i:i+n] for i in range(0, len(response), n)]
for chunk in chunks:
Expand All @@ -93,98 +82,29 @@ async def on_message(self, message):

end = time.time()
print(f"DM: {message.author.mention}")
print(f"Input: {i}")
print(f"Input: {inp}")
print(f"Thought: {thought}")
print(f"Response: {response}")
print(f"Elapsed: {end - start}")
print("=========================================")
# if the message came from a DM channel...
if isinstance(message.channel, discord.channel.DMChannel):
await reply(forward_thought=False)


# if the user mentioned the bot outside of DMs...
if not isinstance(message.channel, discord.channel.DMChannel):
if str(self.bot.user.id) in message.content:
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
CACHE.put(message.channel.id, LOCAL_CHAIN)

i = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')

start = time.time()
async with message.channel.typing():
# thought, response = await self.chat_and_save(LOCAL_CHAIN, i)
thought, response = await BLOOM_CHAIN.chat(LOCAL_CHAIN, i)
await reply(forward_thought=True)

thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}"
n = 1800
if len(thought) > n:
chunks = [thought[i:i+n] for i in range(0, len(thought), n)]
for i in range(chunks):
await thought_channel.send(f"{link}\n```\nThought #{i}: {chunks[i]}\n```")
else:
await thought_channel.send(f"{link}\n```\nThought: {thought}\n```")

if len(response) > n:
chunks = [response[i:i+n] for i in range(0, len(response), n)]
for chunk in chunks:
await message.reply(chunk)
else:
await message.reply(response)

end = time.time()
print(f"Link: {link}")
print(f"Input: {i}")
print(f"Thought: {thought}")
print(f"Response: {response}")
print(f"Elapsed: {end - start}")
print("=========================================")

# if the user replied to the bot outside of DMs...
if not isinstance(message.channel, discord.channel.DMChannel):
if message.reference is not None:
LOCAL_CHAIN = CACHE.get(message.channel.id)
if LOCAL_CHAIN is None:
LOCAL_CHAIN = ConversationCache()
CACHE.put(message.channel.id, LOCAL_CHAIN)
# and if the referenced message is from the bot...
reply_msg = await self.bot.get_channel(message.channel.id).fetch_message(message.reference.message_id)
if reply_msg.author == self.bot.user:
i = message.content.replace(str('<@' + str(self.bot.user.id) + '>'), '')
# check that the reply isn't to one of the bot's thought messages
if reply_msg.content.startswith("https://discord.com"):
return
if message.content.startswith("!no") or message.content.startswith("!No"):
return
start = time.time()
async with message.channel.typing():
# thought, response = await self.chat_and_save(LOCAL_CHAIN, i)
thought, response = await BLOOM_CHAIN.chat(LOCAL_CHAIN, i)

thought_channel = self.bot.get_channel(int(THOUGHT_CHANNEL))
link = f"https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id}"
n = 1800
if len(thought) > n:
chunks = [thought[i:i+n] for i in range(0, len(thought), n)]
for i in range(chunks):
await thought_channel.send(f"{link}\n```\nThought #{i}: {chunks[i]}\n```")
else:
await thought_channel.send(f"{link}\n```\nThought: {thought}\n```")

if len(response) > n:
chunks = [response[i:i+n] for i in range(0, len(response), n)]
for chunk in chunks:
await message.reply(chunk)
else:
await message.reply(response)

end = time.time()
print(f"Link: {link}")
print(f"Input: {i}")
print(f"Thought: {thought}")
print(f"Response: {response}")
print(f"Elapsed: {end - start}")
print("=========================================")
await reply(forward_thought=True)


@commands.slash_command(description="Help using the bot")
async def help(self, ctx: discord.ApplicationContext):
Expand Down
Loading