Skip to content

Commit

Permalink
Remove v2 backend components (#1168)
Browse files Browse the repository at this point in the history
* remove v2 llm memory, implement ReplyStream

* remove v2 websockets & REST handlers

* remove unused v2 data models

* fix slash command autocomplete

* fix unit tests

* remove unused _learned context provider

* fix mypy

* pre-commit

* fix optional k arg in YChatHistory

* bump jupyter chat to 0.7.1 to fix Python 3.9 tests

* revert accidentally breaking /learn
  • Loading branch information
dlqqq authored Dec 25, 2024
1 parent 34f4255 commit aad24e1
Show file tree
Hide file tree
Showing 24 changed files with 331 additions and 1,490 deletions.
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
from jupyter_ai.models import HumanChatMessage
from jupyterlab_chat.ychat import YChat
from jupyterlab_chat.models import Message


class TestSlashCommand(BaseChatHandler):
Expand All @@ -26,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

async def process_message(self, message: HumanChatMessage, chat: YChat):
async def process_message(self, message: Message):
self.reply("This is the `/test` slash command.")
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
from jupyter_ai.models import HumanChatMessage
from jupyterlab_chat.models import Message


class TestSlashCommand(BaseChatHandler):
Expand All @@ -25,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

async def process_message(self, message: HumanChatMessage):
async def process_message(self, message: Message):
self.reply("This is the `/test` slash command.")
29 changes: 16 additions & 13 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import argparse
from typing import Dict, Type

from jupyter_ai.models import HumanChatMessage
from jupyter_ai_magics.providers import BaseProvider
from jupyterlab_chat.models import Message
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.prompts import PromptTemplate
Expand Down Expand Up @@ -59,7 +59,7 @@ def create_llm_chain(
verbose=False,
)

async def process_message(self, message: HumanChatMessage):
async def process_message(self, message: Message):
args = self.parse_args(message)
if args is None:
return
Expand All @@ -70,21 +70,24 @@ async def process_message(self, message: HumanChatMessage):

self.get_llm_chain()

try:
with self.pending("Searching learned documents", message):
with self.start_reply_stream() as reply_stream:
try:
assert self.llm_chain
# TODO: migrate this class to use a LCEL `Runnable` instead of
# `Chain`, then remove the below ignore comment.
result = await self.llm_chain.acall( # type:ignore[attr-defined]
{"question": query}
)
response = result["answer"]
self.reply(response, message)
except AssertionError as e:
self.log.error(e)
response = """Sorry, an error occurred while reading the from the learned documents.
If you have changed the embedding provider, try deleting the existing index by running
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
and then asking the question again.
"""
self.reply(response, message)

# old pending message: "Searching learned documents..."
# TODO: configure this pending message in jupyterlab-chat
reply_stream.write(response)
except AssertionError as e:
self.log.error(e)
response = """Sorry, an error occurred while reading the from the learned documents.
If you have changed the embedding provider, try deleting the existing index by running
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
and then asking the question again.
"""
reply_stream.write(response, message)
Loading

0 comments on commit aad24e1

Please sign in to comment.