From 5be79454c304fcd65243435fa52b6bcdcac347fb Mon Sep 17 00:00:00 2001 From: Eliad Cohen Date: Sun, 28 Jan 2024 21:42:31 -0500 Subject: [PATCH] Addresses typo and clarifiction in comments (#191) Minor changes include a typo fixed and enhancing an example for using OpenAI as an agent model with Ollama via langchain Resolves #189 #190 --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8870d64be9..c2a83e31b4 100644 --- a/README.md +++ b/README.md @@ -70,11 +70,13 @@ researcher = Agent( tools=[search_tool] # You can pass an optional llm attribute specifying what mode you wanna use. # It can be a local model through Ollama / LM Studio or a remote - # model like OpenAI, Mistral, Antrophic of others (https://python.langchain.com/docs/integrations/llms/) + # model like OpenAI, Mistral, Antrophic or others (https://python.langchain.com/docs/integrations/llms/) # # Examples: # llm=ollama_llm # was defined above in the file - # llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7) + # llm=OpenAI(model_name="gpt-3.5", temperature=0.7) + # For the OpenAI model you would need to import + # from langchain_openai import OpenAI ) writer = Agent( role='Tech Content Strategist',