Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove v2 backend components #1168

Merged
merged 11 commits into from
Dec 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
from jupyter_ai.models import HumanChatMessage
from jupyterlab_chat.ychat import YChat
from jupyterlab_chat.models import Message


class TestSlashCommand(BaseChatHandler):
Expand All @@ -26,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

async def process_message(self, message: HumanChatMessage, chat: YChat):
async def process_message(self, message: Message):
self.reply("This is the `/test` slash command.")
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from jupyter_ai.chat_handlers.base import BaseChatHandler, SlashCommandRoutingType
from jupyter_ai.models import HumanChatMessage
from jupyterlab_chat.models import Message


class TestSlashCommand(BaseChatHandler):
Expand All @@ -25,5 +25,5 @@ class TestSlashCommand(BaseChatHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

async def process_message(self, message: HumanChatMessage):
async def process_message(self, message: Message):
self.reply("This is the `/test` slash command.")
29 changes: 16 additions & 13 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import argparse
from typing import Dict, Type

from jupyter_ai.models import HumanChatMessage
from jupyter_ai_magics.providers import BaseProvider
from jupyterlab_chat.models import Message
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.prompts import PromptTemplate
Expand Down Expand Up @@ -59,7 +59,7 @@ def create_llm_chain(
verbose=False,
)

async def process_message(self, message: HumanChatMessage):
async def process_message(self, message: Message):
args = self.parse_args(message)
if args is None:
return
Expand All @@ -70,21 +70,24 @@ async def process_message(self, message: HumanChatMessage):

self.get_llm_chain()

try:
with self.pending("Searching learned documents", message):
with self.start_reply_stream() as reply_stream:
try:
assert self.llm_chain
# TODO: migrate this class to use a LCEL `Runnable` instead of
# `Chain`, then remove the below ignore comment.
result = await self.llm_chain.acall( # type:ignore[attr-defined]
{"question": query}
)
response = result["answer"]
self.reply(response, message)
except AssertionError as e:
self.log.error(e)
response = """Sorry, an error occurred while reading the from the learned documents.
If you have changed the embedding provider, try deleting the existing index by running
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
and then asking the question again.
"""
self.reply(response, message)

# old pending message: "Searching learned documents..."
# TODO: configure this pending message in jupyterlab-chat
reply_stream.write(response)
except AssertionError as e:
self.log.error(e)
response = """Sorry, an error occurred while reading the from the learned documents.
If you have changed the embedding provider, try deleting the existing index by running
`/learn -d` command and then re-submitting the `learn <directory>` to learn the documents,
and then asking the question again.
"""
reply_stream.write(response, message)
Loading
Loading