Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stream chat #10

Merged
merged 3 commits into from
May 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from render_body import render_body
from render_my_conversations import render_my_conversations
import streamlit as st
from firestore_utils import clear_user_history

from dotenv import load_dotenv

Expand All @@ -29,6 +30,7 @@ def load_and_store_conversation(st, cid: str):
def controller():
# TODO: display useful total cost in $
st.session_state["total_cost"] = 0.0
st.session_state["conversation_expanded"] = True

# set model in session if specified in params
model_from_param = get_key_from_params(st, "model")
Expand Down Expand Up @@ -88,9 +90,36 @@ def render_history_menu(sidebar):
render_my_conversations(st, sidebar)


def render_profile(sidebar):
user_info = st.session_state.get("user_info")
if not user_info:
return

status = f"Signed in as {user_info.get('email')}"
with sidebar.expander(status):
st.image(user_info.get("picture"), width=50)
signout = st.button("Sign out", key="button_signout", type="primary")
if signout:
st.session_state.clear()
st.experimental_rerun()
st.write(
"While it's useful to resume past conversations, sometimes you may want to clear your chat history."
)
placeholder = st.empty()
with placeholder:
clear_history = st.button(
"Clear History", key="button_clear_history", type="primary"
)
if clear_history:
clear_user_history(user_info["id"])
placeholder.info("Chat history cleared", icon="✅")
st.snow()


def render_sidebar(sidebar):
render_new_chat(sidebar)
render_auth(st)
render_profile(sidebar)
sidebar.divider()
render_history_menu(sidebar)

Expand Down
9 changes: 4 additions & 5 deletions content/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,11 @@ Thanks for trying this out. It's super alpha, but each person can log in via Goo

Limitations:

- you have to log in again if you reload the page (need to work around streamlit later)
- openai api gives rate limit errors sometimes, causing crashes
- gpt4 feels slower than ai.com naturally
- can't delete personal conversations yet. don't put anything sensitive there.
- **you have to log in again if you reload** the page (streamlit's limitation; work around later)
- openai api response can be slow, with rate limit errors sometimes (chat.openai.com has advantage)
- can only delete all chat history, not individual conversation yet (contribution welcome!)

How to help:

- report bugs to Hien or myself.
- run your code locally https://github.com/CoderPush/chatlit by getting your own keys at .env
- run your code locally https://github.com/CoderPush/chatlit by asking me for .env file
54 changes: 16 additions & 38 deletions render_body.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
from render_conversation import render_conversation
from render_chat_form import render_chat_form
from render_chat_form import render_chat_form, render_chat_stream
from firestore_utils import get_firestore_db
from utils import get_cid_from_params
from firestore_utils import clear_user_history
from utils import get_cid_from_params, get_expander_text


def load_conversation(st):
def load_conversation_from_db(st):
cid = get_cid_from_params(st)
if cid:
db = get_firestore_db()
Expand All @@ -14,45 +13,24 @@ def load_conversation(st):
return {}


def get_expander_text(st):
user = st.session_state.get("user_info", {}).get("name", None)
model = st.session_state.get("model")
if user:
text = f"### {model} with {user}"
else:
text = f"### {model}"
return text
def load_conversation_from_session_state(st):
return st.session_state.get("conversation", {})


def render_body(st):
with st.expander(get_expander_text(st)):
user_info = st.session_state.get("user_info")
if user_info:
st.write(f"Signed in as {user_info.get('email')}")
st.image(user_info.get("picture"), width=50)
signout = st.button("Sign out", key="button_signout", type="primary")
if signout:
st.session_state.clear()
st.experimental_rerun()
st.write(
"While it's useful to resume past conversations, sometimes you may want to clear your chat history."
)
placeholder = st.empty()
with placeholder:
clear_history = st.button(
"Clear History", key="button_clear_history", type="primary"
)
if clear_history:
clear_user_history(user_info["id"])
placeholder.info("Chat history cleared", icon="✅")
st.snow()

conversation = load_conversation(st)
if conversation:
render_conversation(st, conversation)
messages_holder = st.expander(
get_expander_text(st), expanded=st.session_state["conversation_expanded"]
)
with messages_holder:
# load_conversation from session_state
conversation = load_conversation_from_session_state(st)
if conversation:
render_conversation(st, conversation)

if st.session_state.get("user_info"):
render_chat_form(st)
with st.container():
# render_chat_form(st)
render_chat_stream(st)
else:
# load homepage.md into a string
with open("content/overview.md", "r") as f:
Expand Down
99 changes: 94 additions & 5 deletions render_chat_form.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
import openai
from firestore_utils import firestore_save
from utils import generate_conversation_title, get_oauth_uid
import time


def load_messages(st):
def extract_messages(st):
default = [
{
"role": "system",
"content": "You are a helpful assistant. Please use concise language to save bandwidth and token usage. Avoid 'AI language model' disclaimer whenever possible.",
}
]

conversation = st.session_state.get("conversation", {})
default = [{"role": "system", "content": "You are a helpful assistant."}]
messages = conversation.get("messages", default)

return conversation.get("messages", default)
return messages


def get_content(st, response):
Expand All @@ -23,7 +31,7 @@ def get_content(st, response):

def generate_response(st, prompt):
model = st.session_state["model"]
messages = load_messages(st)
messages = extract_messages(st)
messages.append({"role": "user", "content": prompt})

print("openai.ChatCompletion.create with")
Expand All @@ -43,7 +51,7 @@ def generate_response(st, prompt):
return messages, usage


def save_to_firestore(st, messages, usage):
def save_to_firestore(st, messages, usage=None):
model = st.session_state["model"]
if len(messages) > 0:
conversation = st.session_state.get("conversation", {})
Expand All @@ -69,6 +77,7 @@ def save_to_firestore(st, messages, usage):
return new_conversation


# non-streaming version, with usage
def render_chat_form(st):
name = st.session_state.get("user_info", {}).get("name", "You")
model = st.session_state["model"]
Expand All @@ -85,3 +94,83 @@ def render_chat_form(st):
if new_conversation is not None:
st.experimental_set_query_params(cid=new_conversation.id)
st.experimental_rerun()


# see sample-stream.json to know how to parse it
def generate_stream(st, holder, user_input):
model = st.session_state["model"]
messages = extract_messages(st)
messages.append({"role": "user", "content": user_input})

print("openai.ChatCompletion.create with", model, messages)
completion = openai.ChatCompletion.create(
model=model,
messages=messages,
stream=True,
)

# first chunk should be
# {
# "choices": [
# {
# "delta": {
# "role": "assistant"
# },
# "finish_reason": null,
# "index": 0
# }
# ],
# "created": 1684389483,
# "id": "chatcmpl-7HQwF5QPvTrDtYPOvBZbzFfDb9tcI",
# "model": "gpt-3.5-turbo-0301",
# "object": "chat.completion.chunk"
# }

# middle chunks are content:
with holder.container():
content = ""
for chunk in completion:
delta = chunk["choices"][0]["delta"]
if "content" in delta:
content += delta["content"]
holder.markdown(content)

# last chunk should be
# {
# "choices": [
# {
# "delta": {},
# "finish_reason": "stop",
# "index": 0
# }
# ],
# "created": 1684389483,
# "id": "chatcmpl-7HQwF5QPvTrDtYPOvBZbzFfDb9tcI",
# "model": "gpt-3.5-turbo-0301",
# "object": "chat.completion.chunk"
# }

messages.append({"role": "assistant", "content": content})

# No usage info in stream mode yet
# https://community.openai.com/t/usage-info-in-api-responses/18862

return messages


def render_chat_stream(st):
with st.form(key="chat_prompt", clear_on_submit=True):
stream_holder = st.empty()
user_input = st.text_area(
f"You:", key="text_area_stream", label_visibility="collapsed"
)
submit_button = st.form_submit_button(label="Send")

if submit_button and user_input:
st.session_state["conversation_expanded"] = False
messages = generate_stream(st, stream_holder, user_input)
# print("messages: ", messages)
new_conversation = save_to_firestore(st, messages)
if new_conversation is not None:
st.experimental_set_query_params(cid=new_conversation.id)
st.experimental_rerun()
1 change: 0 additions & 1 deletion render_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,4 @@ def render_messages(st, messages):

def render_conversation(st, conversation):
messages = conversation["messages"]

render_messages(st, messages)
Loading