From c60f16158de2b5d9f271ec4e9a651756ba8eeaa6 Mon Sep 17 00:00:00 2001 From: Shorthills AI <141953346+ShorthillsAI@users.noreply.github.com> Date: Fri, 27 Oct 2023 06:42:38 +0530 Subject: [PATCH] Fixed some grammatical and Exception types issues (#12015) Fixed some grammatical issues and Exception types. @baskaryan , @eyurtsev --------- Co-authored-by: Sanskar Tanwar <142409040+SanskarTanwarShorthillsAI@users.noreply.github.com> Co-authored-by: UpneetShorthillsAI <144228282+UpneetShorthillsAI@users.noreply.github.com> Co-authored-by: HarshGuptaShorthillsAI <144897987+HarshGuptaShorthillsAI@users.noreply.github.com> Co-authored-by: AdityaKalraShorthillsAI <143726711+AdityaKalraShorthillsAI@users.noreply.github.com> Co-authored-by: SakshiShorthillsAI <144228183+SakshiShorthillsAI@users.noreply.github.com> --- .../docs/expression_language/cookbook/prompt_llm_parser.ipynb | 4 ++-- docs/docs/guides/debugging.md | 4 ++-- docs/docs/guides/deployments/index.mdx | 2 +- .../comprehend_moderation/base_moderation.py | 2 +- libs/langchain/langchain/llms/baidu_qianfan_endpoint.py | 2 +- libs/langchain/langchain/llms/databricks.py | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index 2449408e023a6..b7021734d4569 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -30,7 +30,7 @@ "source": [ "## PromptTemplate + LLM\n", "\n", - "The simplest composition is just combing a prompt and model to create a chain that takes user input, adds it to a prompt, passes it to a model, and returns the raw model input.\n", + "The simplest composition is just combing a prompt and model to create a chain that takes user input, adds it to a prompt, passes it to a model, and returns the raw model output.\n", "\n", "Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here." ] @@ -76,7 +76,7 @@ "id": "7eb9ef50", "metadata": {}, "source": [ - "Often times we want to attach kwargs that'll be passed to each model call. Here's a few examples of that:" + "Often times we want to attach kwargs that'll be passed to each model call. Here are a few examples of that:" ] }, { diff --git a/docs/docs/guides/debugging.md b/docs/docs/guides/debugging.md index ac3eec7b05c85..65528a56459da 100644 --- a/docs/docs/guides/debugging.md +++ b/docs/docs/guides/debugging.md @@ -376,7 +376,7 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is -### `set_vebose(True)` +### `set_verbose(True)` Setting the `verbose` flag will print out inputs and outputs in a slightly more readable format and will skip logging certain raw outputs (like the token usage stats for an LLM call) so that you can focus on application logic. @@ -656,6 +656,6 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is ## Other callbacks -`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There's a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/how_to/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. +`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/how_to/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. See here for more info on [Callbacks](/docs/modules/callbacks/), how to use them, and customize them. diff --git a/docs/docs/guides/deployments/index.mdx b/docs/docs/guides/deployments/index.mdx index 8299aa02439ad..92bf63641408e 100644 --- a/docs/docs/guides/deployments/index.mdx +++ b/docs/docs/guides/deployments/index.mdx @@ -1,6 +1,6 @@ # Deployment -In today's fast-paced technological landscape, the use of Large Language Models (LLMs) is rapidly expanding. As a result, it's crucial for developers to understand how to effectively deploy these models in production environments. LLM interfaces typically fall into two categories: +In today's fast-paced technological landscape, the use of Large Language Models (LLMs) is rapidly expanding. As a result, it is crucial for developers to understand how to effectively deploy these models in production environments. LLM interfaces typically fall into two categories: - **Case 1: Utilizing External LLM Providers (OpenAI, Anthropic, etc.)** In this scenario, most of the computational burden is handled by the LLM providers, while LangChain simplifies the implementation of business logic around these services. This approach includes features such as prompt templating, chat message generation, caching, vector embedding database creation, preprocessing, etc. diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py index 1005c183a83b9..9b97a42a00d0f 100644 --- a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py +++ b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py @@ -61,7 +61,7 @@ def _convert_prompt_to_text(self, prompt: Any) -> str: input_text = message.content else: raise ValueError( - f"Invalid input type {type(input)}. " + f"Invalid input type {type(input_text)}. " "Must be a PromptValue, str, or list of BaseMessages." ) return input_text diff --git a/libs/langchain/langchain/llms/baidu_qianfan_endpoint.py b/libs/langchain/langchain/llms/baidu_qianfan_endpoint.py index 95914fcbe541f..53b79085e5627 100644 --- a/libs/langchain/langchain/llms/baidu_qianfan_endpoint.py +++ b/libs/langchain/langchain/llms/baidu_qianfan_endpoint.py @@ -96,7 +96,7 @@ def validate_enviroment(cls, values: Dict) -> Dict: values["client"] = qianfan.Completion(**params) except ImportError: - raise ValueError( + raise ImportError( "qianfan package not found, please install it with " "`pip install qianfan`" ) diff --git a/libs/langchain/langchain/llms/databricks.py b/libs/langchain/langchain/llms/databricks.py index fb4a3674b37e7..6488244ff4d93 100644 --- a/libs/langchain/langchain/llms/databricks.py +++ b/libs/langchain/langchain/llms/databricks.py @@ -92,7 +92,7 @@ def get_repl_context() -> Any: return get_context() except ImportError: - raise ValueError( + raise ImportError( "Cannot access dbruntime, not running inside a Databricks notebook." )