Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support AzureOpenAI LLMs & fix research agent defaulting to OpenAI #94

Merged
merged 3 commits into from
Oct 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from typing import Optional

from langchain.prompts import PromptTemplate
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough, chain
from langchain_core.tools import StructuredTool
from langchain_core.language_models import BaseLanguageModel
from pydantic import BaseModel, Field

from motleycrew.applications.research_agent.question import Question
Expand All @@ -16,8 +19,9 @@ class QuestionPrioritizerTool(MotleyTool):
def __init__(
self,
prompt: str | BasePromptTemplate = None,
llm: Optional[BaseLanguageModel] = None,
):
langchain_tool = create_question_prioritizer_langchain_tool(prompt=prompt)
langchain_tool = create_question_prioritizer_langchain_tool(prompt=prompt, llm=llm)

super().__init__(langchain_tool)

Expand Down Expand Up @@ -47,6 +51,7 @@ class QuestionPrioritizerInput(BaseModel, arbitrary_types_allowed=True):

def create_question_prioritizer_langchain_tool(
prompt: str | BasePromptTemplate = None,
llm: Optional[BaseLanguageModel] = None,
) -> StructuredTool:
if prompt is None:
prompt = _default_prompt
Expand All @@ -56,6 +61,7 @@ def create_question_prioritizer_langchain_tool(
name="Question prioritizer",
description="Takes the original question and a list of derived questions, "
"and selects from the latter the one most pertinent to the former",
llm=llm,
)

@chain
Expand Down
2 changes: 1 addition & 1 deletion motleycrew/applications/research_agent/question_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(
self.n_iter = 0
self.question = Question(question=question)
self.graph_store.insert_node(self.question)
self.question_prioritization_tool = QuestionPrioritizerTool()
self.question_prioritization_tool = QuestionPrioritizerTool(llm=llm)
self.question_generation_tool = QuestionGeneratorTool(
query_tool=query_tool, graph=self.graph_store, llm=llm
)
Expand Down
3 changes: 2 additions & 1 deletion motleycrew/common/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ class LLMProvider:
TOGETHER = "together"
GROQ = "groq"
OLLAMA = "ollama"
AZURE_OPENAI = "azure_openai"

ALL = {OPENAI, ANTHROPIC, REPLICATE, TOGETHER, GROQ, OLLAMA}
ALL = {OPENAI, ANTHROPIC, REPLICATE, TOGETHER, GROQ, OLLAMA, AZURE_OPENAI}


class LLMFramework:
Expand Down
46 changes: 44 additions & 2 deletions motleycrew/common/llms.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""Helper functions to initialize Language Models (LLMs) from different frameworks."""

from motleycrew.common import Defaults
from motleycrew.common import LLMProvider, LLMFramework
from motleycrew.common import Defaults, LLMFramework, LLMProvider
from motleycrew.common.exceptions import LLMProviderNotSupported
from motleycrew.common.utils import ensure_module_is_installed

Expand Down Expand Up @@ -209,6 +208,47 @@ def llama_index_ollama_llm(
return Ollama(model=llm_name, temperature=llm_temperature, **kwargs)


def langchain_azure_openai_llm(
llm_name: str = Defaults.DEFAULT_LLM_NAME,
llm_temperature: float = Defaults.DEFAULT_LLM_TEMPERATURE,
**kwargs,
):
"""Initialize an Azure OpenAI LLM client for use with Langchain.

Args:
llm_name: Name of the LLM in Azure OpenAI API.
llm_temperature: Temperature for the LLM.
"""
from langchain_openai import AzureChatOpenAI

return AzureChatOpenAI(model=llm_name, temperature=llm_temperature, **kwargs)


def llama_index_azure_openai_llm(
llm_name: str = Defaults.DEFAULT_LLM_NAME,
llm_temperature: float = Defaults.DEFAULT_LLM_TEMPERATURE,
**kwargs,
):
"""Initialize an Azure OpenAI LLM client for use with LlamaIndex.

Args:
llm_name: Name of the LLM in Azure OpenAI API.
llm_temperature: Temperature for the LLM.
"""
ensure_module_is_installed("llama_index")
from llama_index.llms.azure_openai import AzureOpenAI

if "azure_deployment" in kwargs:
kwargs["engine"] = kwargs.pop("azure_deployment")

if "engine" not in kwargs:
raise ValueError(
"For using Azure OpenAI with LlamaIndex, you must specify an engine/deployment name."
)

return AzureOpenAI(model=llm_name, temperature=llm_temperature, **kwargs)


LLM_MAP = {
(LLMFramework.LANGCHAIN, LLMProvider.OPENAI): langchain_openai_llm,
(LLMFramework.LLAMA_INDEX, LLMProvider.OPENAI): llama_index_openai_llm,
Expand All @@ -222,6 +262,8 @@ def llama_index_ollama_llm(
(LLMFramework.LLAMA_INDEX, LLMProvider.GROQ): llama_index_groq_llm,
(LLMFramework.LANGCHAIN, LLMProvider.OLLAMA): langchain_ollama_llm,
(LLMFramework.LLAMA_INDEX, LLMProvider.OLLAMA): llama_index_ollama_llm,
(LLMFramework.LANGCHAIN, LLMProvider.AZURE_OPENAI): langchain_azure_openai_llm,
(LLMFramework.LLAMA_INDEX, LLMProvider.AZURE_OPENAI): llama_index_azure_openai_llm,
}


Expand Down
6 changes: 4 additions & 2 deletions motleycrew/tools/simple_retriever_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,16 @@ def make_retriever_langchain_tool(
# load the documents and create the index
documents = SimpleDirectoryReader(data_dir).load_data()
index = VectorStoreIndex.from_documents(
documents, transformations=[SentenceSplitter(chunk_size=512), embeddings]
documents,
transformations=[SentenceSplitter(chunk_size=512), embeddings],
embed_model=embeddings,
)
# store it for later
index.storage_context.persist(persist_dir=persist_dir)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
index = load_index_from_storage(storage_context, embed_model=embeddings)

retriever = index.as_retriever(
similarity_top_k=10,
Expand Down
Loading