diff --git a/.github/scripts/prep_api_docs_build.py b/.github/scripts/prep_api_docs_build.py index 2c2655e2155a4..0ddd7ac053de5 100644 --- a/.github/scripts/prep_api_docs_build.py +++ b/.github/scripts/prep_api_docs_build.py @@ -11,7 +11,9 @@ def load_packages_yaml() -> Dict[str, Any]: """Load and parse the packages.yml file.""" with open("langchain/libs/packages.yml", "r") as f: - return yaml.safe_load(f) + all_packages = yaml.safe_load(f) + + return {k: v for k, v in all_packages.items() if k["repo"]} def get_target_dir(package_name: str) -> Path: @@ -23,24 +25,19 @@ def get_target_dir(package_name: str) -> Path: return base_path / "partners" / package_name_short -def clean_target_directories(packages: Dict[str, Any]) -> None: +def clean_target_directories(packages: list) -> None: """Remove old directories that will be replaced.""" - for package in packages["packages"]: - if package["repo"] != "langchain-ai/langchain": - target_dir = get_target_dir(package["name"]) - if target_dir.exists(): - print(f"Removing {target_dir}") - shutil.rmtree(target_dir) + for package in packages: + + target_dir = get_target_dir(package["name"]) + if target_dir.exists(): + print(f"Removing {target_dir}") + shutil.rmtree(target_dir) -def move_libraries(packages: Dict[str, Any]) -> None: +def move_libraries(packages: list) -> None: """Move libraries from their source locations to the target directories.""" - for package in packages["packages"]: - # Skip if it's the main langchain repo or disabled - if package["repo"] == "langchain-ai/langchain" or package.get( - "disabled", False - ): - continue + for package in packages: repo_name = package["repo"].split("/")[1] source_path = package["path"] @@ -68,7 +65,14 @@ def main(): """Main function to orchestrate the library sync process.""" try: # Load packages configuration - packages = load_packages_yaml() + package_yaml = load_packages_yaml() + packages = [ + p + for p in package_yaml["packages"] + if not p.get("disabled", False) + and p["repo"].startswith("langchain-ai/") + and p["repo"] != "langchain-ai/langchain" + ] # Clean target directories clean_target_directories(packages) diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 6dcbfcda11043..bf0c1e0454f20 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -219,11 +219,7 @@ jobs: # Replace all dashes in the package name with underscores, # since that's how Python imports packages with dashes in the name. - if [ "$PKG_NAME" == "langchain-tests" ]; then - IMPORT_NAME="langchain_standard_tests" - else - IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)" - fi + IMPORT_NAME="$(echo "$PKG_NAME" | sed s/-/_/g)" poetry run python -c "import $IMPORT_NAME; print(dir($IMPORT_NAME))" diff --git a/.github/workflows/api_doc_build.yml b/.github/workflows/api_doc_build.yml index 26e2b19cb4602..959f35a94bd37 100644 --- a/.github/workflows/api_doc_build.yml +++ b/.github/workflows/api_doc_build.yml @@ -37,9 +37,9 @@ jobs: # Get unique repositories REPOS=$(echo "$REPOS_UNSORTED" | sort -u) - # Checkout each unique repository + # Checkout each unique repository that is in langchain-ai org for repo in $REPOS; do - if [ "$repo" != "langchain-ai/langchain" ]; then + if [[ "$repo" != "langchain-ai/langchain" && "$repo" == langchain-ai/* ]]; then REPO_NAME=$(echo $repo | cut -d'/' -f2) echo "Checking out $repo to $REPO_NAME" git clone --depth 1 https://github.com/$repo.git $REPO_NAME diff --git a/docs/Makefile b/docs/Makefile index fabed9f041ac5..a3c41260e3dd0 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -38,7 +38,7 @@ install-py-deps: generate-files: mkdir -p $(INTERMEDIATE_DIR) - cp -r $(SOURCE_DIR)/* $(INTERMEDIATE_DIR) + cp -rp $(SOURCE_DIR)/* $(INTERMEDIATE_DIR) $(PYTHON) scripts/tool_feat_table.py $(INTERMEDIATE_DIR) diff --git a/docs/docs/concepts/async.mdx b/docs/docs/concepts/async.mdx index 2eab35964b3b5..d81c706db243b 100644 --- a/docs/docs/concepts/async.mdx +++ b/docs/docs/concepts/async.mdx @@ -46,7 +46,7 @@ Most popular LangChain integrations implement asynchronous support of their APIs When an asynchronous implementation is not available, LangChain tries to provide a default implementation, even if it incurs a **slight** overhead. -By default, LangChain will delegate the execution of a unimplemented asynchronous methods to the synchronous counterparts. LangChain almost always assumes that the synchronous method should be treated as a blocking operation and should be run in a separate thread. +By default, LangChain will delegate the execution of unimplemented asynchronous methods to the synchronous counterparts. LangChain almost always assumes that the synchronous method should be treated as a blocking operation and should be run in a separate thread. This is done using [asyncio.loop.run_in_executor](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) functionality provided by the `asyncio` library. LangChain uses the default executor provided by the `asyncio` library, which lazily initializes a thread pool executor with a default number of threads that is reused in the given event loop. While this strategy incurs a slight overhead due to context switching between threads, it guarantees that every asynchronous method has a default implementation that works out of the box. ## Performance diff --git a/docs/docs/concepts/index.mdx b/docs/docs/concepts/index.mdx index c53cc21ce21f1..9b7272643b49e 100644 --- a/docs/docs/concepts/index.mdx +++ b/docs/docs/concepts/index.mdx @@ -22,6 +22,7 @@ The conceptual guide does not cover step-by-step instructions or specific implem - **[Memory](https://langchain-ai.github.io/langgraph/concepts/memory/)**: Information about a conversation that is persisted so that it can be used in future conversations. - **[Multimodality](/docs/concepts/multimodality)**: The ability to work with data that comes in different forms, such as text, audio, images, and video. - **[Runnable interface](/docs/concepts/runnables)**: The base abstraction that many LangChain components and the LangChain Expression Language are built on. +- **[Streaming](/docs/concepts/streaming)**: LangChain streaming APIs for surfacing results as they are generated. - **[LangChain Expression Language (LCEL)](/docs/concepts/lcel)**: A syntax for orchestrating LangChain components. Most useful for simpler applications. - **[Document loaders](/docs/concepts/document_loaders)**: Load a source as a list of documents. - **[Retrieval](/docs/concepts/retrieval)**: Information retrieval systems can retrieve structured or unstructured data from a datasource in response to a query. diff --git a/docs/docs/contributing/how_to/index.mdx b/docs/docs/contributing/how_to/index.mdx index e4eda7ddbc1d8..d9f903f4156d6 100644 --- a/docs/docs/contributing/how_to/index.mdx +++ b/docs/docs/contributing/how_to/index.mdx @@ -2,4 +2,8 @@ - [**Documentation**](documentation/index.mdx): Help improve our docs, including this one! - [**Code**](code/index.mdx): Help us write code, fix bugs, or improve our infrastructure. -- [**Integrations**](integrations/index.mdx): Help us integrate with your favorite vendors and tools. \ No newline at end of file + +## Integrations + +- [**Start Here**](integrations/index.mdx): Help us integrate with your favorite vendors and tools. +- [**Standard Tests**](integrations/standard_tests): Ensure your integration passes an expected set of tests. diff --git a/docs/docs/contributing/how_to/integrations/community.mdx b/docs/docs/contributing/how_to/integrations/community.mdx index 0b965b0e4c997..c7c3dadef917e 100644 --- a/docs/docs/contributing/how_to/integrations/community.mdx +++ b/docs/docs/contributing/how_to/integrations/community.mdx @@ -1,10 +1,11 @@ -## How to add a community integration (deprecated) +## How to add a community integration (not recommended) :::danger -We are no longer accepting new community integrations. Please see the -[main integration guide](./index.mdx) for more information on contributing new -integrations. +We recommend following the [main integration guide](./index.mdx) to add new integrations instead. + +If you follow this guide, there is a high likelihood we will close your PR with the above +guide linked without much discussion. ::: diff --git a/docs/docs/contributing/how_to/integrations/index.mdx b/docs/docs/contributing/how_to/integrations/index.mdx index 5f08dce43606e..159f4ac4e999b 100644 --- a/docs/docs/contributing/how_to/integrations/index.mdx +++ b/docs/docs/contributing/how_to/integrations/index.mdx @@ -65,10 +65,10 @@ that will render on this site (https://python.langchain.com/). As a prerequisite to adding your integration to our documentation, you must: -1. Confirm that your integration is in the list of components we are currently accepting. +1. Confirm that your integration is in the [list of components](#components-to-integrate) we are currently accepting. 2. Ensure that your integration is in a separate package that can be installed with `pip install `. -3. Implement the standard tests for your integration and successfully run them. -3. Write documentation for your integration in the `docs/docs/integrations` directory of the LangChain monorepo. +3. [Implement the standard tests](/docs/contributing/how_to/integrations/standard_tests) for your integration and successfully run them. +3. Write documentation for your integration in the `docs/docs/integrations/` directory of the LangChain monorepo. 4. Add a provider page for your integration in the `docs/docs/integrations/providers` directory of the LangChain monorepo. Once you have completed these steps, you can submit a PR to the LangChain monorepo to add your integration to the documentation. diff --git a/docs/docs/contributing/how_to/integrations/standard_tests.ipynb b/docs/docs/contributing/how_to/integrations/standard_tests.ipynb index 393bf6961c578..4607fc83c2147 100644 --- a/docs/docs/contributing/how_to/integrations/standard_tests.ipynb +++ b/docs/docs/contributing/how_to/integrations/standard_tests.ipynb @@ -6,27 +6,19 @@ "source": [ "# How to add standard tests to an integration\n", "\n", - "Implementing standard tests \n", - "\n", - "When creating either a custom class for yourself or a new tool to publish in a LangChain integration, it is important to add standard tests to ensure it works as expected. This guide will show you how to add standard tests to a tool, and the templates for implementing each different kind of integration are linked [at the bottom](#standard-test-templates-per-component).\n", + "When creating either a custom class for yourself or a new tool to publish in a LangChain integration, it is important to add standard tests to ensure it works as expected. This guide will show you how to add standard tests to a tool, and you can **[Skip to the test templates](#standard-test-templates-per-component)** for implementing tests for each integration.\n", "\n", "## Setup\n", "\n", "First, let's install 2 dependencies:\n", "\n", "- `langchain-core` will define the interfaces we want to import to define our custom tool.\n", - "- `langchain-tests==0.3.0` will provide the standard tests we want to use.\n", + "- `langchain-tests==0.3.2` will provide the standard tests we want to use.\n", "\n", ":::note\n", "\n", - "The `langchain-tests` package contains the module `langchain_standard_tests`. This name\n", - "mistmatch is due to this package historically being called `langchain_standard_tests` and\n", - "the name not being available on PyPi. This will either be reconciled by our \n", - "[PEP 541 request](https://github.com/pypi/support/issues/5062) (we welcome upvotes!), \n", - "or in a new release of `langchain-tests`.\n", - "\n", "Because added tests in new versions of `langchain-tests` will always break your CI/CD pipelines, we recommend pinning the \n", - "version of `langchain-tests==0.3.0` to avoid unexpected changes.\n", + "version of `langchain-tests==0.3.2` to avoid unexpected changes.\n", "\n", ":::" ] @@ -37,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -U langchain-core langchain-tests==0.3.0 pytest pytest-socket" + "%pip install -U langchain-core langchain-tests==0.3.2 pytest pytest-socket" ] }, { @@ -91,8 +83,8 @@ "\n", "There are 2 namespaces in the `langchain-tests` package: \n", "\n", - "- unit tests (`langchain_standard_tests.unit_tests`): designed to be used to test the tool in isolation and without access to external services\n", - "- integration tests (`langchain_standard_tests.integration_tests`): designed to be used to test the tool with access to external services (in particular, the external service that the tool is designed to interact with).\n", + "- unit tests (`langchain_tests.unit_tests`): designed to be used to test the tool in isolation and without access to external services\n", + "- integration tests (`langchain_tests.integration_tests`): designed to be used to test the tool with access to external services (in particular, the external service that the tool is designed to interact with).\n", "\n", ":::note\n", "\n", @@ -122,10 +114,10 @@ "from typing import Type\n", "\n", "from langchain_parrot_link.tools import ParrotMultiplyTool\n", - "from langchain_standard_tests.unit_tests import ToolsUnitTests\n", + "from langchain_tests.unit_tests import ToolsUnitTests\n", "\n", "\n", - "class ParrotMultiplyToolUnitTests(ToolsUnitTests):\n", + "class TestParrotMultiplyToolUnit(ToolsUnitTests):\n", " @property\n", " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", " return ParrotMultiplyTool\n", @@ -156,10 +148,10 @@ "from typing import Type\n", "\n", "from langchain_parrot_link.tools import ParrotMultiplyTool\n", - "from langchain_standard_tests.integration_tests import ToolsIntegrationTests\n", + "from langchain_tests.integration_tests import ToolsIntegrationTests\n", "\n", "\n", - "class ParrotMultiplyToolIntegrationTests(ToolsIntegrationTests):\n", + "class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):\n", " @property\n", " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", " return ParrotMultiplyTool\n", @@ -217,13 +209,17 @@ "from typing import Tuple, Type\n", "\n", "from langchain_parrot_link.chat_models import ChatParrotLink\n", - "from langchain_standard_tests.unit_tests import ChatModelUnitTests\n", + "from langchain_tests.unit_tests import ChatModelUnitTests\n", "\n", "\n", - "class ChatParrotLinkUnitTests(ChatModelUnitTests):\n", + "class TestChatParrotLinkUnit(ChatModelUnitTests):\n", " @property\n", " def chat_model_class(self) -> Type[ChatParrotLink]:\n", - " return ChatParrotLink" + " return ChatParrotLink\n", + "\n", + " @property\n", + " def chat_model_params(self) -> dict:\n", + " return {\"model\": \"bird-brain-001\", \"temperature\": 0}" ] }, { @@ -236,10 +232,10 @@ "from typing import Type\n", "\n", "from langchain_parrot_link.chat_models import ChatParrotLink\n", - "from langchain_standard_tests.integration_tests import ChatModelIntegrationTests\n", + "from langchain_tests.integration_tests import ChatModelIntegrationTests\n", "\n", "\n", - "class TestMistralStandard(ChatModelIntegrationTests):\n", + "class TestChatParrotLinkIntegration(ChatModelIntegrationTests):\n", " @property\n", " def chat_model_class(self) -> Type[ChatParrotLink]:\n", " return ChatParrotLink\n", @@ -254,32 +250,54 @@ "metadata": {}, "source": [ "\n", - "\n", - "
\n", - "Work in progress:\n", "
\n", - " Tools/Toolkits\n", - " TODO" + " Embedding Models" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "
\n", - "
\n", - " Retrievers\n", - " TODO" + "# title=\"tests/unit_tests/test_embeddings.py\"\n", + "from typing import Tuple, Type\n", + "\n", + "from langchain_parrot_link.embeddings import ParrotLinkEmbeddings\n", + "from langchain_standard_tests.unit_tests import EmbeddingsUnitTests\n", + "\n", + "\n", + "class TestParrotLinkEmbeddingsUnit(EmbeddingsUnitTests):\n", + " @property\n", + " def embeddings_class(self) -> Type[ParrotLinkEmbeddings]:\n", + " return ParrotLinkEmbeddings\n", + "\n", + " @property\n", + " def embedding_model_params(self) -> dict:\n", + " return {\"model\": \"nest-embed-001\", \"temperature\": 0}" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "
\n", - "
\n", - " Vector Stores\n", - " TODO" + "# title=\"tests/integration_tests/test_embeddings.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.embeddings import ParrotLinkEmbeddings\n", + "from langchain_standard_tests.integration_tests import EmbeddingsIntegrationTests\n", + "\n", + "\n", + "class TestParrotLinkEmbeddingsIntegration(EmbeddingsIntegrationTests):\n", + " @property\n", + " def embeddings_class(self) -> Type[ParrotLinkEmbeddings]:\n", + " return ParrotLinkEmbeddings\n", + "\n", + " @property\n", + " def embedding_model_params(self) -> dict:\n", + " return {\"model\": \"nest-embed-001\", \"temperature\": 0}" ] }, { @@ -288,16 +306,83 @@ "source": [ "
\n", "
\n", - " Embedding Models\n", - " TODO" + " Tools/Toolkits\n", + " Note: The standard tests for tools/toolkits are implemented in the example in the main body of this guide too." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"tests/unit_tests/test_tools.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.tools import ParrotMultiplyTool\n", + "from langchain_standard_tests.unit_tests import ToolsUnitTests\n", + "\n", + "\n", + "class TestParrotMultiplyToolUnit(ToolsUnitTests):\n", + " @property\n", + " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", + " return ParrotMultiplyTool\n", + "\n", + " def tool_constructor_params(self) -> dict:\n", + " # if your tool constructor instead required initialization arguments like\n", + " # `def __init__(self, some_arg: int):`, you would return those here\n", + " # as a dictionary, e.g.: `return {'some_arg': 42}`\n", + " return {}\n", + "\n", + " def tool_invoke_params_example(self) -> dict:\n", + " \"\"\"\n", + " Returns a dictionary representing the \"args\" of an example tool call.\n", + "\n", + " This should NOT be a ToolCall dict - i.e. it should not\n", + " have {\"name\", \"id\", \"args\"} keys.\n", + " \"\"\"\n", + " return {\"a\": 2, \"b\": 3}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# title=\"tests/integration_tests/test_tools.py\"\n", + "from typing import Type\n", + "\n", + "from langchain_parrot_link.tools import ParrotMultiplyTool\n", + "from langchain_standard_tests.integration_tests import ToolsIntegrationTests\n", + "\n", + "\n", + "class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):\n", + " @property\n", + " def tool_constructor(self) -> Type[ParrotMultiplyTool]:\n", + " return ParrotMultiplyTool\n", + "\n", + " def tool_constructor_params(self) -> dict:\n", + " # if your tool constructor instead required initialization arguments like\n", + " # `def __init__(self, some_arg: int):`, you would return those here\n", + " # as a dictionary, e.g.: `return {'some_arg': 42}`\n", + " return {}\n", + "\n", + " def tool_invoke_params_example(self) -> dict:\n", + " \"\"\"\n", + " Returns a dictionary representing the \"args\" of an example tool call.\n", + "\n", + " This should NOT be a ToolCall dict - i.e. it should not\n", + " have {\"name\", \"id\", \"args\"} keys.\n", + " \"\"\"\n", + " return {\"a\": 2, \"b\": 3}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "
\n", - "
" + "" ] } ], diff --git a/docs/docs/contributing/index.mdx b/docs/docs/contributing/index.mdx index 67930710585dc..d2c1231789d8d 100644 --- a/docs/docs/contributing/index.mdx +++ b/docs/docs/contributing/index.mdx @@ -17,6 +17,7 @@ More coming soon! We are working on tutorials to help you make your first contri - [**Documentation**](how_to/documentation/index.mdx): Help improve our docs, including this one! - [**Code**](how_to/code/index.mdx): Help us write code, fix bugs, or improve our infrastructure. - [**Integrations**](how_to/integrations/index.mdx): Help us integrate with your favorite vendors and tools. +- [**Standard Tests**](how_to/integrations/standard_tests): Ensure your integration passes an expected set of tests. ## Reference diff --git a/docs/docs/how_to/embed_text.mdx b/docs/docs/how_to/embed_text.mdx index 2450e99440c27..0c636fec3e7a0 100644 --- a/docs/docs/how_to/embed_text.mdx +++ b/docs/docs/how_to/embed_text.mdx @@ -15,87 +15,9 @@ The base Embeddings class in LangChain provides two methods: one for embedding d ### Setup -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; +import EmbeddingTabs from "@theme/EmbeddingTabs"; - - -To start we'll need to install the OpenAI partner package: - -```bash -pip install langchain-openai -``` - -Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running: - -```bash -export OPENAI_API_KEY="..." -``` - -If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class: - -```python -from langchain_openai import OpenAIEmbeddings - -embeddings_model = OpenAIEmbeddings(api_key="...") -``` - -Otherwise you can initialize without any params: -```python -from langchain_openai import OpenAIEmbeddings - -embeddings_model = OpenAIEmbeddings() -``` - - - - -To start we'll need to install the Cohere SDK package: - -```bash -pip install langchain-cohere -``` - -Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running: - -```shell -export COHERE_API_KEY="..." -``` - -If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class: - -```python -from langchain_cohere import CohereEmbeddings - -embeddings_model = CohereEmbeddings(cohere_api_key="...", model='embed-english-v3.0') -``` - -Otherwise you can initialize simply as shown below: -```python -from langchain_cohere import CohereEmbeddings - -embeddings_model = CohereEmbeddings(model='embed-english-v3.0') -``` -Do note that it is mandatory to pass the model parameter while initializing the CohereEmbeddings class. - - - - -To start we'll need to install the Hugging Face partner package: - -```bash -pip install langchain-huggingface -``` - -You can then load any [Sentence Transformers model](https://huggingface.co/models?library=sentence-transformers) from the Hugging Face Hub. - -```python -from langchain_huggingface import HuggingFaceEmbeddings - -embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") -``` - - + ### `embed_documents` #### Embed list of texts diff --git a/docs/docs/integrations/chat/index.mdx b/docs/docs/integrations/chat/index.mdx index e2b0be6da4dda..537cdc2c11a37 100644 --- a/docs/docs/integrations/chat/index.mdx +++ b/docs/docs/integrations/chat/index.mdx @@ -15,6 +15,14 @@ If you'd like to contribute an integration, see [Contributing integrations](/doc ::: +import ChatModelTabs from "@theme/ChatModelTabs"; + + + +```python +model.invoke("Hello, world!") +``` + ## Featured Providers :::info diff --git a/docs/docs/integrations/text_embedding/azureopenai.ipynb b/docs/docs/integrations/text_embedding/azureopenai.ipynb index 7f5281e21e9e5..916132c6f3e43 100644 --- a/docs/docs/integrations/text_embedding/azureopenai.ipynb +++ b/docs/docs/integrations/text_embedding/azureopenai.ipynb @@ -45,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "36521c2a", "metadata": {}, "outputs": [], @@ -53,8 +53,10 @@ "import getpass\n", "import os\n", "\n", - "if not os.getenv(\"OPENAI_API_KEY\"):\n", - " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your AzureOpenAI API key: \")" + "if not os.getenv(\"AZURE_OPENAI_API_KEY\"):\n", + " os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass(\n", + " \"Enter your AzureOpenAI API key: \"\n", + " )" ] }, { diff --git a/docs/docs/integrations/text_embedding/bedrock.ipynb b/docs/docs/integrations/text_embedding/bedrock.ipynb index 3c4768507827c..b559621f29104 100644 --- a/docs/docs/integrations/text_embedding/bedrock.ipynb +++ b/docs/docs/integrations/text_embedding/bedrock.ipynb @@ -31,12 +31,12 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "282239c8-e03a-4abc-86c1-ca6120231a20", "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import BedrockEmbeddings\n", + "from langchain_aws import BedrockEmbeddings\n", "\n", "embeddings = BedrockEmbeddings(\n", " credentials_profile_name=\"bedrock-admin\", region_name=\"us-east-1\"\n", diff --git a/docs/docs/integrations/text_embedding/index.mdx b/docs/docs/integrations/text_embedding/index.mdx index fe2a627192139..169480565122b 100644 --- a/docs/docs/integrations/text_embedding/index.mdx +++ b/docs/docs/integrations/text_embedding/index.mdx @@ -11,6 +11,14 @@ import { CategoryTable, IndexTable } from "@theme/FeatureTables"; This page documents integrations with various model providers that allow you to use embeddings in LangChain. +import EmbeddingTabs from "@theme/EmbeddingTabs"; + + + +```python +embeddings.embed_query("Hello, world!") +``` + ## All embedding models diff --git a/docs/docs/integrations/vectorstores/index.mdx b/docs/docs/integrations/vectorstores/index.mdx index 8b276057dad21..c213d968ee3e2 100644 --- a/docs/docs/integrations/vectorstores/index.mdx +++ b/docs/docs/integrations/vectorstores/index.mdx @@ -3,12 +3,24 @@ sidebar_position: 0 sidebar_class_name: hidden --- -# Vectorstores +# Vector stores import { CategoryTable, IndexTable } from "@theme/FeatureTables"; A [vector store](/docs/concepts/vectorstores) stores [embedded](/docs/concepts/embedding_models) data and performs similarity search. +**Select embedding model:** + +import EmbeddingTabs from "@theme/EmbeddingTabs"; + + + +**Select vector store:** + +import VectorStoreTabs from "@theme/VectorStoreTabs"; + + + ## All Vectorstores diff --git a/docs/docs/tutorials/llm_chain.ipynb b/docs/docs/tutorials/llm_chain.ipynb index fccc2c6cd9fc1..ea1a8ae0288f2 100644 --- a/docs/docs/tutorials/llm_chain.ipynb +++ b/docs/docs/tutorials/llm_chain.ipynb @@ -318,7 +318,7 @@ "source": [ "## Conclusion\n", "\n", - "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to how to create a prompt template, and how to get great observability into chains you create with LangSmith.\n", + "That's it! In this tutorial you've learned how to create your first simple LLM application. You've learned how to work with language models, how to create a prompt template, and how to get great observability into chains you create with LangSmith.\n", "\n", "This just scratches the surface of what you will want to learn to become a proficient AI Engineer. Luckily - we've got a lot of other resources!\n", "\n", diff --git a/docs/scripts/notebook_convert.py b/docs/scripts/notebook_convert.py index 02b83f8032839..429734f115817 100644 --- a/docs/scripts/notebook_convert.py +++ b/docs/scripts/notebook_convert.py @@ -184,7 +184,18 @@ def _convert_notebook( source_paths_stripped = [p.strip() for p in source_path_strs] source_paths = [intermediate_docs_dir / p for p in source_paths_stripped if p] else: - source_paths = intermediate_docs_dir.glob("**/*.ipynb") + original_paths = list(intermediate_docs_dir.glob("**/*.ipynb")) + # exclude files that exist in output directory and are newer + relative_paths = [p.relative_to(intermediate_docs_dir) for p in original_paths] + out_paths = [ + output_docs_dir / p.parent / (p.stem + ".md") for p in relative_paths + ] + source_paths = [ + p + for p, o in zip(original_paths, out_paths) + if not o.exists() or o.stat().st_mtime < p.stat().st_mtime + ] + print(f"rebuilding {len(source_paths)}/{len(relative_paths)} notebooks") with multiprocessing.Pool() as pool: pool.map( diff --git a/docs/src/theme/ChatModelTabs.js b/docs/src/theme/ChatModelTabs.js index 9d27a9a44a135..162723942aab4 100644 --- a/docs/src/theme/ChatModelTabs.js +++ b/docs/src/theme/ChatModelTabs.js @@ -114,7 +114,7 @@ export default function ChatModelTabs(props) { value: "Google", label: "Google", text: `from langchain_google_vertexai import ChatVertexAI\n\n${llmVarName} = ChatVertexAI(${googleParamsOrDefault})`, - apiKeyName: "GOOGLE_API_KEY", + apiKeyText: "# Ensure your VertexAI credentials are configured", packageName: "langchain-google-vertexai", default: false, shouldHide: hideGoogle, diff --git a/docs/src/theme/EmbeddingTabs.js b/docs/src/theme/EmbeddingTabs.js index 7ad62a515ad89..7e1012baec109 100644 --- a/docs/src/theme/EmbeddingTabs.js +++ b/docs/src/theme/EmbeddingTabs.js @@ -7,15 +7,41 @@ export default function EmbeddingTabs(props) { const { openaiParams, hideOpenai, + azureOpenaiParams, + hideAzureOpenai, + googleParams, + hideGoogle, + awsParams, + hideAws, huggingFaceParams, hideHuggingFace, + ollamaParams, + hideOllama, + cohereParams, + hideCohere, + mistralParams, + hideMistral, + nomicParams, + hideNomic, + nvidiaParams, + hideNvidia, fakeEmbeddingParams, hideFakeEmbedding, customVarName, } = props; const openAIParamsOrDefault = openaiParams ?? `model="text-embedding-3-large"`; + const azureParamsOrDefault = + azureOpenaiParams ?? + `\n azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],\n azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],\n openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],\n`; + const googleParamsOrDefault = googleParams ?? `model="text-embedding-004"`; + const awsParamsOrDefault = awsParams ?? `model_id="amazon.titan-embed-text-v2:0"`; const huggingFaceParamsOrDefault = huggingFaceParams ?? `model="sentence-transformers/all-mpnet-base-v2"`; + const ollamaParamsOrDefault = ollamaParams ?? `model="llama3"`; + const cohereParamsOrDefault = cohereParams ?? `model="embed-english-v3.0"`; + const mistralParamsOrDefault = mistralParams ?? `model="mistral-embed"`; + const nomicsParamsOrDefault = nomicParams ?? `model="nomic-embed-text-v1.5"`; + const nvidiaParamsOrDefault = nvidiaParams ?? `model="NV-Embed-QA"`; const fakeEmbeddingParamsOrDefault = fakeEmbeddingParams ?? `size=4096`; const embeddingVarName = customVarName ?? "embeddings"; @@ -30,6 +56,33 @@ export default function EmbeddingTabs(props) { default: true, shouldHide: hideOpenai, }, + { + value: "Azure", + label: "Azure", + text: `from langchain_openai import AzureOpenAIEmbeddings\n\n${embeddingVarName} = AzureOpenAIEmbeddings(${azureParamsOrDefault})`, + apiKeyName: "AZURE_OPENAI_API_KEY", + packageName: "langchain-openai", + default: false, + shouldHide: hideAzureOpenai, + }, + { + value: "Google", + label: "Google", + text: `from langchain_google_vertexai import VertexAIEmbeddings\n\n${embeddingVarName} = VertexAIEmbeddings(${googleParamsOrDefault})`, + apiKeyName: undefined, + packageName: "langchain-google-vertexai", + default: false, + shouldHide: hideGoogle, + }, + { + value: "AWS", + label: "AWS", + text: `from langchain_aws import BedrockEmbeddings\n\n${embeddingVarName} = BedrockEmbeddings(${awsParamsOrDefault})`, + apiKeyName: undefined, + packageName: "langchain-aws", + default: false, + shouldHide: hideAws, + }, { value: "HuggingFace", label: "HuggingFace", @@ -40,9 +93,54 @@ export default function EmbeddingTabs(props) { shouldHide: hideHuggingFace, }, { - value: "Fake Embedding", - label: "Fake Embedding", - text: `from langchain_core.embeddings import FakeEmbeddings\n\n${embeddingVarName} = FakeEmbeddings(${fakeEmbeddingParamsOrDefault})`, + value: "Ollama", + label: "Ollama", + text: `from langchain_ollama import OllamaEmbeddings\n\n${embeddingVarName} = OllamaEmbeddings(${ollamaParamsOrDefault})`, + apiKeyName: undefined, + packageName: "langchain-ollama", + default: false, + shouldHide: hideOllama, + }, + { + value: "Cohere", + label: "Cohere", + text: `from langchain_cohere import CohereEmbeddings\n\n${embeddingVarName} = CohereEmbeddings(${cohereParamsOrDefault})`, + apiKeyName: "COHERE_API_KEY", + packageName: "langchain-cohere", + default: false, + shouldHide: hideCohere, + }, + { + value: "MistralAI", + label: "MistralAI", + text: `from langchain_mistralai import MistralAIEmbeddings\n\n${embeddingVarName} = MistralAIEmbeddings(${mistralParamsOrDefault})`, + apiKeyName: "MISTRALAI_API_KEY", + packageName: "langchain-mistralai", + default: false, + shouldHide: hideMistral, + }, + { + value: "Nomic", + label: "Nomic", + text: `from langchain_nomic import NomicEmbeddings\n\n${embeddingVarName} = NomicEmbeddings(${nomicsParamsOrDefault})`, + apiKeyName: "NOMIC_API_KEY", + packageName: "langchain-nomic", + default: false, + shouldHide: hideNomic, + }, + { + value: "NVIDIA", + label: "NVIDIA", + text: `from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings\n\n${embeddingVarName} = NVIDIAEmbeddings(${nvidiaParamsOrDefault})`, + apiKeyName: "NVIDIA_API_KEY", + packageName: "langchain-nvidia-ai-endpoints", + default: false, + shouldHide: hideNvidia, + }, + { + value: "Fake", + label: "Fake", + text: `from langchain_core.embeddings import DeterministicFakeEmbedding\n\n${embeddingVarName} = DeterministicFakeEmbedding(${fakeEmbeddingParamsOrDefault})`, apiKeyName: undefined, packageName: "langchain-core", default: false, @@ -55,9 +153,7 @@ export default function EmbeddingTabs(props) { {tabItems .filter((tabItem) => !tabItem.shouldHide) .map((tabItem) => { - const apiKeyText = tabItem.apiKeyName ? `import getpass - - os.environ["${tabItem.apiKeyName}"] = getpass.getpass()` : ''; + const apiKeyText = tabItem.apiKeyName ? `import getpass\n\nos.environ["${tabItem.apiKeyName}"] = getpass.getpass()` : ''; return ( + {tabItems.map((tabItem) => ( + + {`pip install -qU ${tabItem.packageName}`} + {tabItem.text} + + ))} + + ); +} diff --git a/libs/community/langchain_community/chat_models/lindormai.py b/libs/community/langchain_community/chat_models/lindormai.py deleted file mode 100644 index 12207f3987dca..0000000000000 --- a/libs/community/langchain_community/chat_models/lindormai.py +++ /dev/null @@ -1,366 +0,0 @@ -"""Lindorm AI chat model.""" -from __future__ import annotations - -import logging -from typing import ( - Any, - AsyncIterator, - Callable, - Dict, - Iterator, - List, - Optional, - Sequence, - Type, - Union, -) - -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import LanguageModelInput -from langchain_core.language_models.chat_models import BaseChatModel -from langchain_core.messages import ( - AIMessage, - AIMessageChunk, - BaseMessage, - BaseMessageChunk, - ChatMessage, - ChatMessageChunk, - HumanMessage, - HumanMessageChunk, - SystemMessage, - SystemMessageChunk, -) -from langchain_core.outputs import ( - ChatGeneration, - ChatGenerationChunk, - ChatResult, -) -from langchain_core.pydantic_v1 import Field, root_validator, BaseModel -from langchain_core.runnables import Runnable -from requests.exceptions import HTTPError -from tenacity import ( - before_sleep_log, - retry, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) - -logger = logging.getLogger(__name__) - -DEFAULT_TEMPERATURE = 0.1 -DEFAULT_NUM_OUTPUTS = 256 # tokens - - -def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage: - if isinstance(message_chunk, HumanMessageChunk): - return HumanMessage(content=message_chunk.content) - elif isinstance(message_chunk, AIMessageChunk): - return AIMessage(content=message_chunk.content) - elif isinstance(message_chunk, SystemMessageChunk): - return SystemMessage(content=message_chunk.content) - elif isinstance(message_chunk, ChatMessageChunk): - return ChatMessage(role=message_chunk.role, content=message_chunk.content) - else: - raise TypeError(f"Got unknown type {message_chunk}") - - -def convert_message_to_dict(message: BaseMessage) -> dict: - """Convert a message to a dict.""" - message_dict: Dict[str, Any] - if isinstance(message, ChatMessage): - message_dict = {"role": message.role, "content": message.content} - elif isinstance(message, HumanMessage): - message_dict = {"role": "user", "content": message.content} - elif isinstance(message, AIMessage): - message_dict = {"role": "assistant", "content": message.content} - elif isinstance(message, SystemMessage): - message_dict = {"role": "system", "content": message.content} - else: - raise TypeError(f"Got unknown type {message}") - return message_dict - - -def _create_retry_decorator(llm: ChatLindormAI) -> Callable[[Any], Any]: - min_seconds = 1 - max_seconds = 4 - return retry( - reraise=True, - stop=stop_after_attempt(llm.max_retries), - wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), - retry=(retry_if_exception_type(HTTPError)), - before_sleep=before_sleep_log(logger, logging.WARNING), - ) - - -class ChatLindormAI(BaseChatModel): - """LindormAI chat models API. - - To use, you should have the ``lindormai`` python package installed, - and set variable ``endpoint``, ``username``, ``password`` and ``model_name``. - - Example: - .. code-block:: python - from langchain_community.chat_models.lindormai import ChatLindormAI - lindorm_ai_chat = ChatLindormAI( - endpoint='https://ld-xxx-proxy-ml.lindorm.rds.aliyuncs.com:9002', - username='root', - password='xxx', - model_name='qwen-72b' - ) - """ - - client: Any #: :meta private: - endpoint: str = Field(...) - username: str = Field(...) - password: str = Field(...) - model_name: str = Field(...) - - max_tokens: Optional[int] = Field( - description="The maximum number of tokens to generate.", - default=DEFAULT_NUM_OUTPUTS, - gt=0, - ) - temperature: Optional[float] = Field( - description="The temperature to use during generation.", - default=DEFAULT_TEMPERATURE, - gte=0.0, - lte=2.0, - ) - top_k: Optional[int] = Field( - description="Sample counter when generate.", default=None - ) - top_p: Optional[float] = Field( - description="Sample probability threshold when generate.", default=None - ) - seed: Optional[int] = Field( - description="Random seed when generate.", default=1234, gte=0 - ) - repetition_penalty: Optional[float] = Field( - description="Penalty for repeated words in generated text; 1.0 is no penalty.", - default=None, - ) - max_retries: int = 10 - streaming: bool = False - - @property - def _llm_type(self) -> str: - """Return type of llm.""" - return "lindormai" - - @root_validator() - def validate_environment(cls, values: Dict) -> Dict: - """Ensure the client is initialized properly.""" - if not values.get("client"): - try: - import lindormai - except ImportError: - raise ImportError( - "Could not import lindormai python package. " - "Please install it with `pip install lindormai-x.y.z-py3-none-any.whl`." - ) - - from lindormai.model_manager import ModelManager - values["client"] = ModelManager(values['endpoint'], values['username'], values['password']) - return values - - @property - def _default_params(self) -> Dict[str, Any]: - """Get the default parameters for calling Lindorm AI SDK.""" - default_params = { - "model": self.model_name, - "max_tokens": self.max_tokens, - "streaming": self.streaming, - "temperature": self.temperature, - "seed": self.seed, - } - if self.top_k is not None: - default_params["top_k"] = self.top_k - if self.top_p is not None: - default_params["top_p"] = self.top_p - return default_params - - def completion_with_retry(self, **kwargs: Any) -> Any: - """Use tenacity to retry the completion call.""" - retry_decorator = _create_retry_decorator(self) - - @retry_decorator - def _completion_with_retry(**_kwargs: Any) -> Any: - return self.client.infer(name=self.model_name, input_data=str(_kwargs["input_data"]), params=_kwargs) - - return _completion_with_retry(**kwargs) - - def stream_completion_with_retry(self, **kwargs: Any) -> Any: - """Use tenacity to retry the completion call.""" - retry_decorator = _create_retry_decorator(self) - - @retry_decorator - def _stream_completion_with_retry(**_kwargs: Any) -> Any: - responses = self.client.stream_infer(name=self.model_name, input_data=str(_kwargs["input_data"]), - params=_kwargs) - for resp in responses: - yield resp - - return _stream_completion_with_retry(**kwargs) - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - generations = [] - if self.streaming: - generation: Optional[ChatGenerationChunk] = None - for chunk in self._stream(messages, stop=stop, run_manager=run_manager, **kwargs): - if generation is None: - generation = chunk - else: - generation += chunk - assert generation is not None - generations.append(self._chunk_to_generation(generation)) - else: - params: Dict[str, Any] = self._invocation_params(messages=messages, stop=stop, **kwargs) - params["force_nonstream"] = True - resp = self.completion_with_retry(**params) - generations.append( - ChatGeneration(**self._chat_generation_from_lindormai_resp(resp)) - ) - return ChatResult( - generations=generations, - llm_output={ - "model_name": self.model_name, - }, - ) - - async def _agenerate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - raise NotImplementedError("Please use `_generate`. Official does not support asynchronous requests") - - def _stream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> Iterator[ChatGenerationChunk]: - params: Dict[str, Any] = self._invocation_params(messages=messages, stop=stop, stream=True, **kwargs) - for stream_resp in self.stream_completion_with_retry(**params): - chunk = ChatGenerationChunk( - **self._chat_generation_from_lindormai_resp(stream_resp, is_chunk=True) - ) - if run_manager: - run_manager.on_llm_new_token(chunk.text, chunk=chunk) - yield chunk - - async def _astream( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> AsyncIterator[ChatGenerationChunk]: - raise NotImplementedError("Please use `_stream`. Official does not support asynchronous requests") - - def _invocation_params( - self, messages: List[BaseMessage], stop: Any, **kwargs: Any - ) -> Dict[str, Any]: - params = {**self._default_params, **kwargs} - if stop is not None: - params["stop"] = stop - if params.get("stream"): - params["stream"] = True - message_dicts = [convert_message_to_dict(m) for m in messages] - # According to the docs, the last message should be a `user` message - if message_dicts[-1]["role"] != "user": - raise ValueError("Last message should be a user message.") - # And the `system` message should be the first message if present - system_message_indices = [ - i for i, m in enumerate(message_dicts) if m["role"] == "system" - ] - if len(system_message_indices) == 1 and system_message_indices[0] != 0: - raise ValueError("System message can only be the first message.") - elif len(system_message_indices) > 1: - raise ValueError("There can only be one system message at most.") - params["input_data"] = message_dicts - return params - - def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: - if llm_outputs[0] is None: - return {} - return llm_outputs[0] - - @staticmethod - def _chat_generation_from_lindormai_resp( - resp: Any, is_chunk: bool = False - ) -> Dict[str, Any]: - if resp is None: - raise ValueError("Response cannot be None") - elif 'output' in resp: - content = resp['output'] - else: - content = '' - for output in resp['outputs']: - content += output - return dict( - message=AIMessage(content=content) - ) - - @staticmethod - def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration: - return ChatGeneration( - message=convert_message_chunk_to_message(chunk.message), - generation_info=chunk.generation_info, - ) - - def bind_functions( - self, - functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]], - function_call: Optional[str] = None, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, BaseMessage]: - """TODO(minshan): Bind functions (and other objects) to this chat model. This method will be used for function calls in the future. - - Args: - functions: A list of function definitions to bind to this chat model. - Can be a dictionary, pydantic model, or callable. Pydantic - models and callables will be automatically converted to - their schema dictionary representation. - function_call: Which function to require the model to call. - Must be the name of the single provided function or - "auto" to automatically determine which function to call - (if any). - kwargs: Any additional parameters to pass to the - :class:`~langchain.runnable.Runnable` constructor. - """ - from langchain.chains.openai_functions.base import convert_to_openai_function - - formatted_functions = [convert_to_openai_function(fn) for fn in functions] - - if function_call is not None: - if len(formatted_functions) != 1: - raise ValueError( - "When specifying `function_call`, you must provide exactly one " - "function." - ) - if formatted_functions[0]["name"] != function_call: - raise ValueError( - f"Function call {function_call} was specified, but the only " - f"provided function was {formatted_functions[0]['name']}." - ) - function_call_ = {"name": function_call} - kwargs = {**kwargs, "function_call": function_call_} - - return super().bind( - functions=formatted_functions, - **kwargs, - ) diff --git a/libs/community/tests/integration_tests/chat_models/test_litellm_standard.py b/libs/community/tests/integration_tests/chat_models/test_litellm_standard.py index 972617617bd4c..5e87e3ac8a0e2 100644 --- a/libs/community/tests/integration_tests/chat_models/test_litellm_standard.py +++ b/libs/community/tests/integration_tests/chat_models/test_litellm_standard.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_community.chat_models.litellm import ChatLiteLLM diff --git a/libs/community/tests/integration_tests/chat_models/test_moonshot.py b/libs/community/tests/integration_tests/chat_models/test_moonshot.py index 68d9f43b5d8f0..de4725cfa6c8e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_moonshot.py +++ b/libs/community/tests/integration_tests/chat_models/test_moonshot.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from pydantic import SecretStr from langchain_community.chat_models.moonshot import MoonshotChat diff --git a/libs/community/tests/integration_tests/storage/test_mongodb.py b/libs/community/tests/integration_tests/storage/test_mongodb.py index 850e4f73c2b47..82aa4b76057f3 100644 --- a/libs/community/tests/integration_tests/storage/test_mongodb.py +++ b/libs/community/tests/integration_tests/storage/test_mongodb.py @@ -2,7 +2,7 @@ import pytest from langchain_core.documents import Document -from langchain_standard_tests.integration_tests.base_store import BaseStoreSyncTests +from langchain_tests.integration_tests.base_store import BaseStoreSyncTests from langchain_community.storage.mongodb import MongoDBByteStore, MongoDBStore diff --git a/libs/community/tests/integration_tests/vectorstores/test_aperturedb.py b/libs/community/tests/integration_tests/vectorstores/test_aperturedb.py index 9e5f370d7d4cb..15d65de90b136 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_aperturedb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_aperturedb.py @@ -3,7 +3,7 @@ import uuid import pytest -from langchain_standard_tests.integration_tests.vectorstores import ( +from langchain_tests.integration_tests.vectorstores import ( AsyncReadWriteTestSuite, ReadWriteTestSuite, ) diff --git a/libs/community/tests/unit_tests/chat_models/test_cloudflare_workersai.py b/libs/community/tests/unit_tests/chat_models/test_cloudflare_workersai.py index 940616048de02..0f0fabe3cd1e5 100644 --- a/libs/community/tests/unit_tests/chat_models/test_cloudflare_workersai.py +++ b/libs/community/tests/unit_tests/chat_models/test_cloudflare_workersai.py @@ -11,7 +11,7 @@ SystemMessage, ToolMessage, ) -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_community.chat_models.cloudflare_workersai import ( ChatCloudflareWorkersAI, diff --git a/libs/community/tests/unit_tests/chat_models/test_litellm.py b/libs/community/tests/unit_tests/chat_models/test_litellm.py index 583c53d64bd1d..1d11fe5bd3f0a 100644 --- a/libs/community/tests/unit_tests/chat_models/test_litellm.py +++ b/libs/community/tests/unit_tests/chat_models/test_litellm.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_community.chat_models.litellm import ChatLiteLLM diff --git a/libs/community/tests/unit_tests/vectorstores/test_inmemory.py b/libs/community/tests/unit_tests/vectorstores/test_inmemory.py index 6facca3429b2a..b650cda14fdd1 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_inmemory.py +++ b/libs/community/tests/unit_tests/vectorstores/test_inmemory.py @@ -3,7 +3,7 @@ import pytest from langchain_core.documents import Document -from langchain_standard_tests.integration_tests.vectorstores import ( +from langchain_tests.integration_tests.vectorstores import ( AsyncReadWriteTestSuite, ReadWriteTestSuite, ) diff --git a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py index 6ddeefc62330b..58ee2514644dd 100644 --- a/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py +++ b/libs/core/tests/unit_tests/indexing/test_in_memory_indexer.py @@ -3,7 +3,7 @@ from collections.abc import AsyncGenerator, Generator import pytest -from langchain_standard_tests.integration_tests.indexer import ( +from langchain_tests.integration_tests.indexer import ( AsyncDocumentIndexTestSuite, DocumentIndexerTestSuite, ) diff --git a/libs/core/tests/unit_tests/stores/test_in_memory.py b/libs/core/tests/unit_tests/stores/test_in_memory.py index 3c5f810b1fc6a..1ae91db4a385e 100644 --- a/libs/core/tests/unit_tests/stores/test_in_memory.py +++ b/libs/core/tests/unit_tests/stores/test_in_memory.py @@ -1,5 +1,5 @@ import pytest -from langchain_standard_tests.integration_tests.base_store import ( +from langchain_tests.integration_tests.base_store import ( BaseStoreAsyncTests, BaseStoreSyncTests, ) diff --git a/libs/core/tests/unit_tests/vectorstores/test_in_memory.py b/libs/core/tests/unit_tests/vectorstores/test_in_memory.py index 1ab6186882639..34764f99c1016 100644 --- a/libs/core/tests/unit_tests/vectorstores/test_in_memory.py +++ b/libs/core/tests/unit_tests/vectorstores/test_in_memory.py @@ -2,7 +2,7 @@ from unittest.mock import AsyncMock, Mock import pytest -from langchain_standard_tests.integration_tests.vectorstores import ( +from langchain_tests.integration_tests.vectorstores import ( AsyncReadWriteTestSuite, ReadWriteTestSuite, ) diff --git a/libs/langchain/langchain/agents/output_parsers/react_single_input.py b/libs/langchain/langchain/agents/output_parsers/react_single_input.py index f63821588fe5d..b853cb04aaa18 100644 --- a/libs/langchain/langchain/agents/output_parsers/react_single_input.py +++ b/libs/langchain/langchain/agents/output_parsers/react_single_input.py @@ -9,7 +9,7 @@ FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( - "Invalid Format: Missing 'Action:' after 'Thought:" + "Invalid Format: Missing 'Action:' after 'Thought:'" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" diff --git a/libs/langchain/tests/integration_tests/chat_models/test_base.py b/libs/langchain/tests/integration_tests/chat_models/test_base.py index efed6e1d52290..baee018999c28 100644 --- a/libs/langchain/tests/integration_tests/chat_models/test_base.py +++ b/libs/langchain/tests/integration_tests/chat_models/test_base.py @@ -5,7 +5,7 @@ from langchain_core.messages import AIMessage from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableConfig -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from pydantic import BaseModel from langchain.chat_models import init_chat_model diff --git a/libs/partners/anthropic/tests/integration_tests/test_standard.py b/libs/partners/anthropic/tests/integration_tests/test_standard.py index 241588f32a34c..dc69c1dda4034 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_standard.py +++ b/libs/partners/anthropic/tests/integration_tests/test_standard.py @@ -5,7 +5,7 @@ from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_anthropic import ChatAnthropic diff --git a/libs/partners/anthropic/tests/unit_tests/test_standard.py b/libs/partners/anthropic/tests/unit_tests/test_standard.py index 7976dcb2bc9cf..7879e2995bb8e 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_standard.py +++ b/libs/partners/anthropic/tests/unit_tests/test_standard.py @@ -3,7 +3,7 @@ from typing import Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_anthropic import ChatAnthropic diff --git a/libs/partners/fireworks/tests/integration_tests/test_standard.py b/libs/partners/fireworks/tests/integration_tests/test_standard.py index cfefb2445e6a3..692dcb40cf357 100644 --- a/libs/partners/fireworks/tests/integration_tests/test_standard.py +++ b/libs/partners/fireworks/tests/integration_tests/test_standard.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ( # type: ignore[import-not-found] +from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests, # type: ignore[import-not-found] ) diff --git a/libs/partners/fireworks/tests/unit_tests/test_embeddings_standard.py b/libs/partners/fireworks/tests/unit_tests/test_embeddings_standard.py index ea8d16f92d0a8..44b6197a39181 100644 --- a/libs/partners/fireworks/tests/unit_tests/test_embeddings_standard.py +++ b/libs/partners/fireworks/tests/unit_tests/test_embeddings_standard.py @@ -3,7 +3,7 @@ from typing import Tuple, Type from langchain_core.embeddings import Embeddings -from langchain_standard_tests.unit_tests.embeddings import EmbeddingsUnitTests +from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests from langchain_fireworks import FireworksEmbeddings diff --git a/libs/partners/fireworks/tests/unit_tests/test_standard.py b/libs/partners/fireworks/tests/unit_tests/test_standard.py index 61d0d152ba831..4f444897154a8 100644 --- a/libs/partners/fireworks/tests/unit_tests/test_standard.py +++ b/libs/partners/fireworks/tests/unit_tests/test_standard.py @@ -3,7 +3,7 @@ from typing import Tuple, Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ( # type: ignore[import-not-found] +from langchain_tests.unit_tests import ( # type: ignore[import-not-found] ChatModelUnitTests, # type: ignore[import-not-found] ) diff --git a/libs/partners/groq/tests/integration_tests/test_chat_models.py b/libs/partners/groq/tests/integration_tests/test_chat_models.py index 1b2cf05b54239..a672de34bc53e 100644 --- a/libs/partners/groq/tests/integration_tests/test_chat_models.py +++ b/libs/partners/groq/tests/integration_tests/test_chat_models.py @@ -395,7 +395,7 @@ class Joke(BaseModel): def test_tool_calling_no_arguments() -> None: - # Note: this is a variant of a test in langchain_standard_tests + # Note: this is a variant of a test in langchain_tests # that as of 2024-08-19 fails with "Failed to call a function. Please # adjust your prompt." when `tool_choice="any"` is specified, but # passes when `tool_choice` is not specified. diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index d34257b4c529e..3870ae953f6ee 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -5,7 +5,7 @@ import pytest from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter -from langchain_standard_tests.integration_tests import ( +from langchain_tests.integration_tests import ( ChatModelIntegrationTests, ) diff --git a/libs/partners/groq/tests/unit_tests/test_standard.py b/libs/partners/groq/tests/unit_tests/test_standard.py index 939d817499e25..e4df2916f3052 100644 --- a/libs/partners/groq/tests/unit_tests/test_standard.py +++ b/libs/partners/groq/tests/unit_tests/test_standard.py @@ -3,7 +3,7 @@ from typing import Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests.chat_models import ( +from langchain_tests.unit_tests.chat_models import ( ChatModelUnitTests, ) diff --git a/libs/partners/huggingface/tests/integration_tests/test_embeddings_standard.py b/libs/partners/huggingface/tests/integration_tests/test_embeddings_standard.py index 4f5cefcbdff2c..a878bea8cfa8a 100644 --- a/libs/partners/huggingface/tests/integration_tests/test_embeddings_standard.py +++ b/libs/partners/huggingface/tests/integration_tests/test_embeddings_standard.py @@ -2,7 +2,7 @@ from typing import Type -from langchain_standard_tests.integration_tests import EmbeddingsIntegrationTests +from langchain_tests.integration_tests import EmbeddingsIntegrationTests from langchain_huggingface.embeddings import ( HuggingFaceEmbeddings, diff --git a/libs/partners/huggingface/tests/integration_tests/test_standard.py b/libs/partners/huggingface/tests/integration_tests/test_standard.py index 34392e979f437..682a4c625ee9f 100644 --- a/libs/partners/huggingface/tests/integration_tests/test_standard.py +++ b/libs/partners/huggingface/tests/integration_tests/test_standard.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint diff --git a/libs/partners/mistralai/tests/integration_tests/test_standard.py b/libs/partners/mistralai/tests/integration_tests/test_standard.py index cea6399ee4cd8..822f2284abc1c 100644 --- a/libs/partners/mistralai/tests/integration_tests/test_standard.py +++ b/libs/partners/mistralai/tests/integration_tests/test_standard.py @@ -3,7 +3,7 @@ from typing import Optional, Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ( # type: ignore[import-not-found] +from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests, # type: ignore[import-not-found] ) diff --git a/libs/partners/mistralai/tests/unit_tests/test_standard.py b/libs/partners/mistralai/tests/unit_tests/test_standard.py index 408a6e5a6b9f8..fde82de2d5071 100644 --- a/libs/partners/mistralai/tests/unit_tests/test_standard.py +++ b/libs/partners/mistralai/tests/unit_tests/test_standard.py @@ -3,7 +3,7 @@ from typing import Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ( # type: ignore[import-not-found] +from langchain_tests.unit_tests import ( # type: ignore[import-not-found] ChatModelUnitTests, # type: ignore[import-not-found] ) diff --git a/libs/partners/ollama/tests/integration_tests/test_chat_models.py b/libs/partners/ollama/tests/integration_tests/test_chat_models.py index 10ffcb39d2777..9133106cae7b9 100644 --- a/libs/partners/ollama/tests/integration_tests/test_chat_models.py +++ b/libs/partners/ollama/tests/integration_tests/test_chat_models.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_ollama.chat_models import ChatOllama diff --git a/libs/partners/ollama/tests/integration_tests/test_embeddings.py b/libs/partners/ollama/tests/integration_tests/test_embeddings.py index 26722362e6b66..0953510c901aa 100644 --- a/libs/partners/ollama/tests/integration_tests/test_embeddings.py +++ b/libs/partners/ollama/tests/integration_tests/test_embeddings.py @@ -2,7 +2,7 @@ from typing import Type -from langchain_standard_tests.integration_tests import EmbeddingsIntegrationTests +from langchain_tests.integration_tests import EmbeddingsIntegrationTests from langchain_ollama.embeddings import OllamaEmbeddings diff --git a/libs/partners/ollama/tests/unit_tests/test_chat_models.py b/libs/partners/ollama/tests/unit_tests/test_chat_models.py index d086914310489..850e3423e0bf9 100644 --- a/libs/partners/ollama/tests/unit_tests/test_chat_models.py +++ b/libs/partners/ollama/tests/unit_tests/test_chat_models.py @@ -2,7 +2,7 @@ from typing import Dict, Type -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_ollama.chat_models import ChatOllama diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 7d69b27fb1bc8..94c96202b07f6 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -993,9 +993,6 @@ def _should_stream( # TODO: Add support for streaming with Pydantic response_format. warnings.warn("Streaming with Pydantic response_format not yet supported.") return False - if self.model_name is not None and self.model_name.startswith("o1"): - # TODO: Add support for streaming with o1 once supported. - return False return super()._should_stream( async_api=async_api, run_manager=run_manager, **kwargs diff --git a/libs/partners/openai/poetry.lock b/libs/partners/openai/poetry.lock index 17cd5f497af25..118f8dc31d35c 100644 --- a/libs/partners/openai/poetry.lock +++ b/libs/partners/openai/poetry.lock @@ -188,73 +188,73 @@ files = [ [[package]] name = "coverage" -version = "7.6.4" +version = "7.6.7" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" files = [ - {file = "coverage-7.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5f8ae553cba74085db385d489c7a792ad66f7f9ba2ee85bfa508aeb84cf0ba07"}, - {file = "coverage-7.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8165b796df0bd42e10527a3f493c592ba494f16ef3c8b531288e3d0d72c1f6f0"}, - {file = "coverage-7.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c8b95bf47db6d19096a5e052ffca0a05f335bc63cef281a6e8fe864d450a72"}, - {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ed9281d1b52628e81393f5eaee24a45cbd64965f41857559c2b7ff19385df51"}, - {file = "coverage-7.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0809082ee480bb8f7416507538243c8863ac74fd8a5d2485c46f0f7499f2b491"}, - {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d541423cdd416b78626b55f123412fcf979d22a2c39fce251b350de38c15c15b"}, - {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58809e238a8a12a625c70450b48e8767cff9eb67c62e6154a642b21ddf79baea"}, - {file = "coverage-7.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c9b8e184898ed014884ca84c70562b4a82cbc63b044d366fedc68bc2b2f3394a"}, - {file = "coverage-7.6.4-cp310-cp310-win32.whl", hash = "sha256:6bd818b7ea14bc6e1f06e241e8234508b21edf1b242d49831831a9450e2f35fa"}, - {file = "coverage-7.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:06babbb8f4e74b063dbaeb74ad68dfce9186c595a15f11f5d5683f748fa1d172"}, - {file = "coverage-7.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:73d2b73584446e66ee633eaad1a56aad577c077f46c35ca3283cd687b7715b0b"}, - {file = "coverage-7.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:51b44306032045b383a7a8a2c13878de375117946d68dcb54308111f39775a25"}, - {file = "coverage-7.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3fb02fe73bed561fa12d279a417b432e5b50fe03e8d663d61b3d5990f29546"}, - {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed8fe9189d2beb6edc14d3ad19800626e1d9f2d975e436f84e19efb7fa19469b"}, - {file = "coverage-7.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b369ead6527d025a0fe7bd3864e46dbee3aa8f652d48df6174f8d0bac9e26e0e"}, - {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ade3ca1e5f0ff46b678b66201f7ff477e8fa11fb537f3b55c3f0568fbfe6e718"}, - {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:27fb4a050aaf18772db513091c9c13f6cb94ed40eacdef8dad8411d92d9992db"}, - {file = "coverage-7.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f704f0998911abf728a7783799444fcbbe8261c4a6c166f667937ae6a8aa522"}, - {file = "coverage-7.6.4-cp311-cp311-win32.whl", hash = "sha256:29155cd511ee058e260db648b6182c419422a0d2e9a4fa44501898cf918866cf"}, - {file = "coverage-7.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:8902dd6a30173d4ef09954bfcb24b5d7b5190cf14a43170e386979651e09ba19"}, - {file = "coverage-7.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12394842a3a8affa3ba62b0d4ab7e9e210c5e366fbac3e8b2a68636fb19892c2"}, - {file = "coverage-7.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b6b4c83d8e8ea79f27ab80778c19bc037759aea298da4b56621f4474ffeb117"}, - {file = "coverage-7.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d5b8007f81b88696d06f7df0cb9af0d3b835fe0c8dbf489bad70b45f0e45613"}, - {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b57b768feb866f44eeed9f46975f3d6406380275c5ddfe22f531a2bf187eda27"}, - {file = "coverage-7.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5915fcdec0e54ee229926868e9b08586376cae1f5faa9bbaf8faf3561b393d52"}, - {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b58c672d14f16ed92a48db984612f5ce3836ae7d72cdd161001cc54512571f2"}, - {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2fdef0d83a2d08d69b1f2210a93c416d54e14d9eb398f6ab2f0a209433db19e1"}, - {file = "coverage-7.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cf717ee42012be8c0cb205dbbf18ffa9003c4cbf4ad078db47b95e10748eec5"}, - {file = "coverage-7.6.4-cp312-cp312-win32.whl", hash = "sha256:7bb92c539a624cf86296dd0c68cd5cc286c9eef2d0c3b8b192b604ce9de20a17"}, - {file = "coverage-7.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:1032e178b76a4e2b5b32e19d0fd0abbce4b58e77a1ca695820d10e491fa32b08"}, - {file = "coverage-7.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:023bf8ee3ec6d35af9c1c6ccc1d18fa69afa1cb29eaac57cb064dbb262a517f9"}, - {file = "coverage-7.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0ac3d42cb51c4b12df9c5f0dd2f13a4f24f01943627120ec4d293c9181219ba"}, - {file = "coverage-7.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8fe4984b431f8621ca53d9380901f62bfb54ff759a1348cd140490ada7b693c"}, - {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5fbd612f8a091954a0c8dd4c0b571b973487277d26476f8480bfa4b2a65b5d06"}, - {file = "coverage-7.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dacbc52de979f2823a819571f2e3a350a7e36b8cb7484cdb1e289bceaf35305f"}, - {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:dab4d16dfef34b185032580e2f2f89253d302facba093d5fa9dbe04f569c4f4b"}, - {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:862264b12ebb65ad8d863d51f17758b1684560b66ab02770d4f0baf2ff75da21"}, - {file = "coverage-7.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5beb1ee382ad32afe424097de57134175fea3faf847b9af002cc7895be4e2a5a"}, - {file = "coverage-7.6.4-cp313-cp313-win32.whl", hash = "sha256:bf20494da9653f6410213424f5f8ad0ed885e01f7e8e59811f572bdb20b8972e"}, - {file = "coverage-7.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:182e6cd5c040cec0a1c8d415a87b67ed01193ed9ad458ee427741c7d8513d963"}, - {file = "coverage-7.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a181e99301a0ae128493a24cfe5cfb5b488c4e0bf2f8702091473d033494d04f"}, - {file = "coverage-7.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:df57bdbeffe694e7842092c5e2e0bc80fff7f43379d465f932ef36f027179806"}, - {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bcd1069e710600e8e4cf27f65c90c7843fa8edfb4520fb0ccb88894cad08b11"}, - {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99b41d18e6b2a48ba949418db48159d7a2e81c5cc290fc934b7d2380515bd0e3"}, - {file = "coverage-7.6.4-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1e54712ba3474f34b7ef7a41e65bd9037ad47916ccb1cc78769bae324c01a"}, - {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53d202fd109416ce011578f321460795abfe10bb901b883cafd9b3ef851bacfc"}, - {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:c48167910a8f644671de9f2083a23630fbf7a1cb70ce939440cd3328e0919f70"}, - {file = "coverage-7.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cc8ff50b50ce532de2fa7a7daae9dd12f0a699bfcd47f20945364e5c31799fef"}, - {file = "coverage-7.6.4-cp313-cp313t-win32.whl", hash = "sha256:b8d3a03d9bfcaf5b0141d07a88456bb6a4c3ce55c080712fec8418ef3610230e"}, - {file = "coverage-7.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:f3ddf056d3ebcf6ce47bdaf56142af51bb7fad09e4af310241e9db7a3a8022e1"}, - {file = "coverage-7.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9cb7fa111d21a6b55cbf633039f7bc2749e74932e3aa7cb7333f675a58a58bf3"}, - {file = "coverage-7.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11a223a14e91a4693d2d0755c7a043db43d96a7450b4f356d506c2562c48642c"}, - {file = "coverage-7.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a413a096c4cbac202433c850ee43fa326d2e871b24554da8327b01632673a076"}, - {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00a1d69c112ff5149cabe60d2e2ee948752c975d95f1e1096742e6077affd376"}, - {file = "coverage-7.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f76846299ba5c54d12c91d776d9605ae33f8ae2b9d1d3c3703cf2db1a67f2c0"}, - {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fe439416eb6380de434886b00c859304338f8b19f6f54811984f3420a2e03858"}, - {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0294ca37f1ba500667b1aef631e48d875ced93ad5e06fa665a3295bdd1d95111"}, - {file = "coverage-7.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6f01ba56b1c0e9d149f9ac85a2f999724895229eb36bd997b61e62999e9b0901"}, - {file = "coverage-7.6.4-cp39-cp39-win32.whl", hash = "sha256:bc66f0bf1d7730a17430a50163bb264ba9ded56739112368ba985ddaa9c3bd09"}, - {file = "coverage-7.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:c481b47f6b5845064c65a7bc78bc0860e635a9b055af0df46fdf1c58cebf8e8f"}, - {file = "coverage-7.6.4-pp39.pp310-none-any.whl", hash = "sha256:3c65d37f3a9ebb703e710befdc489a38683a5b152242664b973a7b7b22348a4e"}, - {file = "coverage-7.6.4.tar.gz", hash = "sha256:29fc0f17b1d3fea332f8001d4558f8214af7f1d87a345f3a133c901d60347c73"}, + {file = "coverage-7.6.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:108bb458827765d538abcbf8288599fee07d2743357bdd9b9dad456c287e121e"}, + {file = "coverage-7.6.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c973b2fe4dc445cb865ab369df7521df9c27bf40715c837a113edaa2aa9faf45"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c6b24007c4bcd0b19fac25763a7cac5035c735ae017e9a349b927cfc88f31c1"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acbb8af78f8f91b3b51f58f288c0994ba63c646bc1a8a22ad072e4e7e0a49f1c"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad32a981bcdedb8d2ace03b05e4fd8dace8901eec64a532b00b15217d3677dd2"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:34d23e28ccb26236718a3a78ba72744212aa383141961dd6825f6595005c8b06"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e25bacb53a8c7325e34d45dddd2f2fbae0dbc230d0e2642e264a64e17322a777"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af05bbba896c4472a29408455fe31b3797b4d8648ed0a2ccac03e074a77e2314"}, + {file = "coverage-7.6.7-cp310-cp310-win32.whl", hash = "sha256:796c9b107d11d2d69e1849b2dfe41730134b526a49d3acb98ca02f4985eeff7a"}, + {file = "coverage-7.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:987a8e3da7da4eed10a20491cf790589a8e5e07656b6dc22d3814c4d88faf163"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e61b0e77ff4dddebb35a0e8bb5a68bf0f8b872407d8d9f0c726b65dfabe2469"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a5407a75ca4abc20d6252efeb238377a71ce7bda849c26c7a9bece8680a5d99"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df002e59f2d29e889c37abd0b9ee0d0e6e38c24f5f55d71ff0e09e3412a340ec"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673184b3156cba06154825f25af33baa2671ddae6343f23175764e65a8c4c30b"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69ad502f1a2243f739f5bd60565d14a278be58be4c137d90799f2c263e7049a"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60dcf7605c50ea72a14490d0756daffef77a5be15ed1b9fea468b1c7bda1bc3b"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9c2eb378bebb2c8f65befcb5147877fc1c9fbc640fc0aad3add759b5df79d55d"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c0317288f032221d35fa4cbc35d9f4923ff0dfd176c79c9b356e8ef8ef2dff4"}, + {file = "coverage-7.6.7-cp311-cp311-win32.whl", hash = "sha256:951aade8297358f3618a6e0660dc74f6b52233c42089d28525749fc8267dccd2"}, + {file = "coverage-7.6.7-cp311-cp311-win_amd64.whl", hash = "sha256:5e444b8e88339a2a67ce07d41faabb1d60d1004820cee5a2c2b54e2d8e429a0f"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f07ff574986bc3edb80e2c36391678a271d555f91fd1d332a1e0f4b5ea4b6ea9"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:49ed5ee4109258973630c1f9d099c7e72c5c36605029f3a91fe9982c6076c82b"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3e8796434a8106b3ac025fd15417315d7a58ee3e600ad4dbcfddc3f4b14342c"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b925300484a3294d1c70f6b2b810d6526f2929de954e5b6be2bf8caa1f12c1"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c42ec2c522e3ddd683dec5cdce8e62817afb648caedad9da725001fa530d354"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0266b62cbea568bd5e93a4da364d05de422110cbed5056d69339bd5af5685433"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e5f2a0f161d126ccc7038f1f3029184dbdf8f018230af17ef6fd6a707a5b881f"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c132b5a22821f9b143f87446805e13580b67c670a548b96da945a8f6b4f2efbb"}, + {file = "coverage-7.6.7-cp312-cp312-win32.whl", hash = "sha256:7c07de0d2a110f02af30883cd7dddbe704887617d5c27cf373362667445a4c76"}, + {file = "coverage-7.6.7-cp312-cp312-win_amd64.whl", hash = "sha256:fd49c01e5057a451c30c9b892948976f5d38f2cbd04dc556a82743ba8e27ed8c"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:46f21663e358beae6b368429ffadf14ed0a329996248a847a4322fb2e35d64d3"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:40cca284c7c310d622a1677f105e8507441d1bb7c226f41978ba7c86979609ab"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77256ad2345c29fe59ae861aa11cfc74579c88d4e8dbf121cbe46b8e32aec808"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87ea64b9fa52bf395272e54020537990a28078478167ade6c61da7ac04dc14bc"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d608a7808793e3615e54e9267519351c3ae204a6d85764d8337bd95993581a8"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdd94501d65adc5c24f8a1a0eda110452ba62b3f4aeaba01e021c1ed9cb8f34a"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82c809a62e953867cf57e0548c2b8464207f5f3a6ff0e1e961683e79b89f2c55"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bb684694e99d0b791a43e9fc0fa58efc15ec357ac48d25b619f207c41f2fd384"}, + {file = "coverage-7.6.7-cp313-cp313-win32.whl", hash = "sha256:963e4a08cbb0af6623e61492c0ec4c0ec5c5cf74db5f6564f98248d27ee57d30"}, + {file = "coverage-7.6.7-cp313-cp313-win_amd64.whl", hash = "sha256:14045b8bfd5909196a90da145a37f9d335a5d988a83db34e80f41e965fb7cb42"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f2c7a045eef561e9544359a0bf5784b44e55cefc7261a20e730baa9220c83413"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dd4e4a49d9c72a38d18d641135d2fb0bdf7b726ca60a103836b3d00a1182acd"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c95e0fa3d1547cb6f021ab72f5c23402da2358beec0a8e6d19a368bd7b0fb37"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f63e21ed474edd23f7501f89b53280014436e383a14b9bd77a648366c81dce7b"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead9b9605c54d15be228687552916c89c9683c215370c4a44f1f217d2adcc34d"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0573f5cbf39114270842d01872952d301027d2d6e2d84013f30966313cadb529"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e2c8e3384c12dfa19fa9a52f23eb091a8fad93b5b81a41b14c17c78e23dd1d8b"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:70a56a2ec1869e6e9fa69ef6b76b1a8a7ef709972b9cc473f9ce9d26b5997ce3"}, + {file = "coverage-7.6.7-cp313-cp313t-win32.whl", hash = "sha256:dbba8210f5067398b2c4d96b4e64d8fb943644d5eb70be0d989067c8ca40c0f8"}, + {file = "coverage-7.6.7-cp313-cp313t-win_amd64.whl", hash = "sha256:dfd14bcae0c94004baba5184d1c935ae0d1231b8409eb6c103a5fd75e8ecdc56"}, + {file = "coverage-7.6.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37a15573f988b67f7348916077c6d8ad43adb75e478d0910957394df397d2874"}, + {file = "coverage-7.6.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b6cce5c76985f81da3769c52203ee94722cd5d5889731cd70d31fee939b74bf0"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ab9763d291a17b527ac6fd11d1a9a9c358280adb320e9c2672a97af346ac2c"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cf96ceaa275f071f1bea3067f8fd43bec184a25a962c754024c973af871e1b7"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aee9cf6b0134d6f932d219ce253ef0e624f4fa588ee64830fcba193269e4daa3"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2bc3e45c16564cc72de09e37413262b9f99167803e5e48c6156bccdfb22c8327"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:623e6965dcf4e28a3debaa6fcf4b99ee06d27218f46d43befe4db1c70841551c"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:850cfd2d6fc26f8346f422920ac204e1d28814e32e3a58c19c91980fa74d8289"}, + {file = "coverage-7.6.7-cp39-cp39-win32.whl", hash = "sha256:c296263093f099da4f51b3dff1eff5d4959b527d4f2f419e16508c5da9e15e8c"}, + {file = "coverage-7.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:90746521206c88bdb305a4bf3342b1b7316ab80f804d40c536fc7d329301ee13"}, + {file = "coverage-7.6.7-pp39.pp310-none-any.whl", hash = "sha256:0ddcb70b3a3a57581b450571b31cb774f23eb9519c2aaa6176d3a84c9fc57671"}, + {file = "coverage-7.6.7.tar.gz", hash = "sha256:d79d4826e41441c9a118ff045e4bccb9fdbdcb1d02413e7ea6eb5c87b5439d24"}, ] [package.dependencies] @@ -315,13 +315,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -537,13 +537,13 @@ url = "../../standard-tests" [[package]] name = "langsmith" -version = "0.1.142" +version = "0.1.143" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.142-py3-none-any.whl", hash = "sha256:f639ca23c9a0bb77af5fb881679b2f66ff1f21f19d0bebf4e51375e7585a8b38"}, - {file = "langsmith-0.1.142.tar.gz", hash = "sha256:f8a84d100f3052233ff0a1d66ae14c5dfc20b7e41a1601de011384f16ee6cb82"}, + {file = "langsmith-0.1.143-py3-none-any.whl", hash = "sha256:ba0d827269e9b03a90fababe41fa3e4e3f833300b95add10184f7e67167dde6f"}, + {file = "langsmith-0.1.143.tar.gz", hash = "sha256:4c5159e5cd84b3f8499433009e72d2076dd2daf6c044ac8a3611b30d0d0161c5"}, ] [package.dependencies] @@ -667,13 +667,13 @@ files = [ [[package]] name = "openai" -version = "1.54.3" +version = "1.54.4" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"}, - {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"}, + {file = "openai-1.54.4-py3-none-any.whl", hash = "sha256:0d95cef99346bf9b6d7fbf57faf61a673924c3e34fa8af84c9ffe04660673a7e"}, + {file = "openai-1.54.4.tar.gz", hash = "sha256:50f3656e45401c54e973fa05dc29f3f0b0d19348d685b2f7ddb4d92bf7b1b6bf"}, ] [package.dependencies] diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 492cbc63eac4b..744812533a7eb 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "langchain-openai" -version = "0.2.8" +version = "0.2.9" description = "An integration package connecting OpenAI and LangChain" authors = [] readme = "README.md" diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py b/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py index c99bfb1126e22..fff0599963c2f 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_azure_standard.py @@ -5,7 +5,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import AzureChatOpenAI diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 858ab1e4b4321..cbee0d7041850 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -22,10 +22,8 @@ ) from langchain_core.outputs import ChatGeneration, ChatResult, LLMResult from langchain_core.prompts import ChatPromptTemplate -from langchain_standard_tests.integration_tests.chat_models import ( - _validate_tool_call_message, -) -from langchain_standard_tests.integration_tests.chat_models import ( +from langchain_tests.integration_tests.chat_models import _validate_tool_call_message +from langchain_tests.integration_tests.chat_models import ( magic_function as invalid_magic_function, ) from pydantic import BaseModel, Field @@ -1022,7 +1020,8 @@ def test_audio_input_modality() -> None: def test_prediction_tokens() -> None: - code = dedent(""" + code = dedent( + """ /// /// Represents a user with a first name, last name, and username. /// @@ -1043,7 +1042,8 @@ def test_prediction_tokens() -> None: /// public string Username { get; set; } } - """) + """ + ) llm = ChatOpenAI(model="gpt-4o") query = ( diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py index b021603aace41..890e70268085f 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py @@ -5,7 +5,7 @@ from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_openai import ChatOpenAI diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_azure_standard.py b/libs/partners/openai/tests/unit_tests/chat_models/test_azure_standard.py index 465c1dc0c22a5..3d1faa97db485 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_azure_standard.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_azure_standard.py @@ -4,7 +4,7 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_openai import AzureChatOpenAI diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base_standard.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base_standard.py index 8049da874cbf3..d24ddd1e71f7e 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base_standard.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base_standard.py @@ -3,7 +3,7 @@ from typing import Tuple, Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_openai import ChatOpenAI diff --git a/libs/partners/openai/tests/unit_tests/embeddings/test_azure_standard.py b/libs/partners/openai/tests/unit_tests/embeddings/test_azure_standard.py index b5f1591c476ca..a972fe5e38581 100644 --- a/libs/partners/openai/tests/unit_tests/embeddings/test_azure_standard.py +++ b/libs/partners/openai/tests/unit_tests/embeddings/test_azure_standard.py @@ -1,7 +1,7 @@ from typing import Tuple, Type from langchain_core.embeddings import Embeddings -from langchain_standard_tests.unit_tests.embeddings import EmbeddingsUnitTests +from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests from langchain_openai import AzureOpenAIEmbeddings diff --git a/libs/partners/openai/tests/unit_tests/embeddings/test_base_standard.py b/libs/partners/openai/tests/unit_tests/embeddings/test_base_standard.py index b265e5600eb35..b1b2f2d368fa9 100644 --- a/libs/partners/openai/tests/unit_tests/embeddings/test_base_standard.py +++ b/libs/partners/openai/tests/unit_tests/embeddings/test_base_standard.py @@ -3,7 +3,7 @@ from typing import Tuple, Type from langchain_core.embeddings import Embeddings -from langchain_standard_tests.unit_tests.embeddings import EmbeddingsUnitTests +from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests from langchain_openai import OpenAIEmbeddings diff --git a/libs/partners/xai/tests/integration_tests/test_chat_models_standard.py b/libs/partners/xai/tests/integration_tests/test_chat_models_standard.py index c686f7e19486c..1152fe44946f7 100644 --- a/libs/partners/xai/tests/integration_tests/test_chat_models_standard.py +++ b/libs/partners/xai/tests/integration_tests/test_chat_models_standard.py @@ -5,7 +5,7 @@ import pytest # type: ignore[import-not-found] from langchain_core.language_models import BaseChatModel from langchain_core.rate_limiters import InMemoryRateLimiter -from langchain_standard_tests.integration_tests import ( # type: ignore[import-not-found] +from langchain_tests.integration_tests import ( # type: ignore[import-not-found] ChatModelIntegrationTests, # type: ignore[import-not-found] ) diff --git a/libs/partners/xai/tests/unit_tests/test_chat_models_standard.py b/libs/partners/xai/tests/unit_tests/test_chat_models_standard.py index a0d6a5170a8d6..a70718f3224a2 100644 --- a/libs/partners/xai/tests/unit_tests/test_chat_models_standard.py +++ b/libs/partners/xai/tests/unit_tests/test_chat_models_standard.py @@ -3,7 +3,7 @@ from typing import Tuple, Type from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ( # type: ignore[import-not-found] +from langchain_tests.unit_tests import ( # type: ignore[import-not-found] ChatModelUnitTests, # type: ignore[import-not-found] ) diff --git a/libs/standard-tests/Makefile b/libs/standard-tests/Makefile index a48f80928b993..5283db9a9dc43 100644 --- a/libs/standard-tests/Makefile +++ b/libs/standard-tests/Makefile @@ -25,7 +25,7 @@ PYTHON_FILES=. MYPY_CACHE=.mypy_cache lint format: PYTHON_FILES=. lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/standard-tests --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$') -lint_package: PYTHON_FILES=langchain_standard_tests +lint_package: PYTHON_FILES=langchain_tests lint_tests: PYTHON_FILES=tests lint_tests: MYPY_CACHE=.mypy_cache_test @@ -44,7 +44,7 @@ spell_check: spell_fix: poetry run codespell --toml pyproject.toml -w -check_imports: $(shell find langchain_standard_tests -name '*.py') +check_imports: $(shell find langchain_tests -name '*.py') poetry run python ./scripts/check_imports.py $^ ###################### diff --git a/libs/standard-tests/README.md b/libs/standard-tests/README.md index 0060203a1e2be..029a9bafb76da 100644 --- a/libs/standard-tests/README.md +++ b/libs/standard-tests/README.md @@ -40,7 +40,7 @@ from typing import Type import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.unit_tests import ChatModelUnitTests +from langchain_tests.unit_tests import ChatModelUnitTests from langchain_parrot_chain import ChatParrotChain @@ -60,7 +60,7 @@ from typing import Type import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ChatModelIntegrationTests +from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_parrot_chain import ChatParrotChain diff --git a/libs/standard-tests/langchain_standard_tests/__init__.py b/libs/standard-tests/langchain_tests/__init__.py similarity index 100% rename from libs/standard-tests/langchain_standard_tests/__init__.py rename to libs/standard-tests/langchain_tests/__init__.py diff --git a/libs/standard-tests/langchain_standard_tests/base.py b/libs/standard-tests/langchain_tests/base.py similarity index 96% rename from libs/standard-tests/langchain_standard_tests/base.py rename to libs/standard-tests/langchain_tests/base.py index e9f71802737ba..f2b7ca1f7e92d 100644 --- a/libs/standard-tests/langchain_standard_tests/base.py +++ b/libs/standard-tests/langchain_tests/base.py @@ -13,7 +13,7 @@ def test_no_overrides_DO_NOT_OVERRIDE(self) -> None: def explore_bases(cls: Type) -> None: nonlocal comparison_class for base in cls.__bases__: - if base.__module__.startswith("langchain_standard_tests."): + if base.__module__.startswith("langchain_tests."): if comparison_class is None: comparison_class = base else: diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py b/libs/standard-tests/langchain_tests/integration_tests/__init__.py similarity index 90% rename from libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py rename to libs/standard-tests/langchain_tests/integration_tests/__init__.py index ec26de72a4a7e..67ccb38f29e94 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/__init__.py +++ b/libs/standard-tests/langchain_tests/integration_tests/__init__.py @@ -14,9 +14,7 @@ ] for module in modules: - pytest.register_assert_rewrite( - f"langchain_standard_tests.integration_tests.{module}" - ) + pytest.register_assert_rewrite(f"langchain_tests.integration_tests.{module}") from .base_store import BaseStoreAsyncTests, BaseStoreSyncTests from .cache import AsyncCacheTestSuite, SyncCacheTestSuite diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/base_store.py b/libs/standard-tests/langchain_tests/integration_tests/base_store.py similarity index 99% rename from libs/standard-tests/langchain_standard_tests/integration_tests/base_store.py rename to libs/standard-tests/langchain_tests/integration_tests/base_store.py index e4b461d98227a..cc5fab8bcf7a4 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/base_store.py +++ b/libs/standard-tests/langchain_tests/integration_tests/base_store.py @@ -4,7 +4,7 @@ import pytest from langchain_core.stores import BaseStore -from langchain_standard_tests.base import BaseStandardTests +from langchain_tests.base import BaseStandardTests V = TypeVar("V") diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/cache.py b/libs/standard-tests/langchain_tests/integration_tests/cache.py similarity index 99% rename from libs/standard-tests/langchain_standard_tests/integration_tests/cache.py rename to libs/standard-tests/langchain_tests/integration_tests/cache.py index 7d1359f5154ec..7087da8ea07b3 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/cache.py +++ b/libs/standard-tests/langchain_tests/integration_tests/cache.py @@ -4,7 +4,7 @@ from langchain_core.caches import BaseCache from langchain_core.outputs import Generation -from langchain_standard_tests.base import BaseStandardTests +from langchain_tests.base import BaseStandardTests class SyncCacheTestSuite(BaseStandardTests): diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py similarity index 99% rename from libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py rename to libs/standard-tests/langchain_tests/integration_tests/chat_models.py index 9eea91aebbe44..f0ac13d461443 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/integration_tests/chat_models.py @@ -21,11 +21,11 @@ from pydantic.v1 import BaseModel as BaseModelV1 from pydantic.v1 import Field as FieldV1 -from langchain_standard_tests.unit_tests.chat_models import ( +from langchain_tests.unit_tests.chat_models import ( ChatModelTests, my_adder_tool, ) -from langchain_standard_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION +from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION class MagicFunctionSchema(BaseModel): diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/embeddings.py b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py similarity index 96% rename from libs/standard-tests/langchain_standard_tests/integration_tests/embeddings.py rename to libs/standard-tests/langchain_tests/integration_tests/embeddings.py index 1de2aece78023..7e3689d0f5429 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/integration_tests/embeddings.py @@ -2,7 +2,7 @@ from langchain_core.embeddings import Embeddings -from langchain_standard_tests.unit_tests.embeddings import EmbeddingsTests +from langchain_tests.unit_tests.embeddings import EmbeddingsTests class EmbeddingsIntegrationTests(EmbeddingsTests): diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/indexer.py b/libs/standard-tests/langchain_tests/integration_tests/indexer.py similarity index 100% rename from libs/standard-tests/langchain_standard_tests/integration_tests/indexer.py rename to libs/standard-tests/langchain_tests/integration_tests/indexer.py diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/tools.py b/libs/standard-tests/langchain_tests/integration_tests/tools.py similarity index 97% rename from libs/standard-tests/langchain_standard_tests/integration_tests/tools.py rename to libs/standard-tests/langchain_tests/integration_tests/tools.py index ad854c3b1d41d..2609a87c84521 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/tools.py +++ b/libs/standard-tests/langchain_tests/integration_tests/tools.py @@ -1,7 +1,7 @@ from langchain_core.messages import ToolCall from langchain_core.tools import BaseTool -from langchain_standard_tests.unit_tests.tools import ToolsTests +from langchain_tests.unit_tests.tools import ToolsTests class ToolsIntegrationTests(ToolsTests): diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/vectorstores.py b/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py similarity index 99% rename from libs/standard-tests/langchain_standard_tests/integration_tests/vectorstores.py rename to libs/standard-tests/langchain_tests/integration_tests/vectorstores.py index d7735cfdd2bcf..08b0358dcfb9e 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/vectorstores.py +++ b/libs/standard-tests/langchain_tests/integration_tests/vectorstores.py @@ -7,7 +7,7 @@ from langchain_core.embeddings.fake import DeterministicFakeEmbedding, Embeddings from langchain_core.vectorstores import VectorStore -from langchain_standard_tests.base import BaseStandardTests +from langchain_tests.base import BaseStandardTests # Arbitrarily chosen. Using a small embedding size # so tests are faster and easier to debug. diff --git a/libs/standard-tests/langchain_standard_tests/py.typed b/libs/standard-tests/langchain_tests/py.typed similarity index 100% rename from libs/standard-tests/langchain_standard_tests/py.typed rename to libs/standard-tests/langchain_tests/py.typed diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py b/libs/standard-tests/langchain_tests/unit_tests/__init__.py similarity index 86% rename from libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py rename to libs/standard-tests/langchain_tests/unit_tests/__init__.py index df94c79ae61cc..ee4a1700edaf1 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/__init__.py +++ b/libs/standard-tests/langchain_tests/unit_tests/__init__.py @@ -11,7 +11,7 @@ ] for module in modules: - pytest.register_assert_rewrite(f"langchain_standard_tests.unit_tests.{module}") + pytest.register_assert_rewrite(f"langchain_tests.unit_tests.{module}") from .chat_models import ChatModelUnitTests from .embeddings import EmbeddingsUnitTests diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py similarity index 98% rename from libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py rename to libs/standard-tests/langchain_tests/unit_tests/chat_models.py index 9bde7fbf3e1a5..f5e5fe54f6e76 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/chat_models.py +++ b/libs/standard-tests/langchain_tests/unit_tests/chat_models.py @@ -22,8 +22,8 @@ ) from syrupy import SnapshotAssertion -from langchain_standard_tests.base import BaseStandardTests -from langchain_standard_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION +from langchain_tests.base import BaseStandardTests +from langchain_tests.utils.pydantic import PYDANTIC_MAJOR_VERSION class Person(BaseModel): # Used by some dependent tests. Should be deprecated. diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py similarity index 96% rename from libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py rename to libs/standard-tests/langchain_tests/unit_tests/embeddings.py index 39c6e941c5d7e..da7b78513844b 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/embeddings.py +++ b/libs/standard-tests/langchain_tests/unit_tests/embeddings.py @@ -7,7 +7,7 @@ from langchain_core.embeddings import Embeddings from pydantic import SecretStr -from langchain_standard_tests.base import BaseStandardTests +from langchain_tests.base import BaseStandardTests class EmbeddingsTests(BaseStandardTests): diff --git a/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py b/libs/standard-tests/langchain_tests/unit_tests/tools.py similarity index 97% rename from libs/standard-tests/langchain_standard_tests/unit_tests/tools.py rename to libs/standard-tests/langchain_tests/unit_tests/tools.py index 0decc51230fbf..b92cb4f5263a4 100644 --- a/libs/standard-tests/langchain_standard_tests/unit_tests/tools.py +++ b/libs/standard-tests/langchain_tests/unit_tests/tools.py @@ -7,7 +7,7 @@ from langchain_core.tools import BaseTool from pydantic import SecretStr -from langchain_standard_tests.base import BaseStandardTests +from langchain_tests.base import BaseStandardTests class ToolsTests(BaseStandardTests): diff --git a/libs/standard-tests/langchain_standard_tests/utils/__init__.py b/libs/standard-tests/langchain_tests/utils/__init__.py similarity index 100% rename from libs/standard-tests/langchain_standard_tests/utils/__init__.py rename to libs/standard-tests/langchain_tests/utils/__init__.py diff --git a/libs/standard-tests/langchain_standard_tests/utils/pydantic.py b/libs/standard-tests/langchain_tests/utils/pydantic.py similarity index 100% rename from libs/standard-tests/langchain_standard_tests/utils/pydantic.py rename to libs/standard-tests/langchain_tests/utils/pydantic.py diff --git a/libs/standard-tests/pyproject.toml b/libs/standard-tests/pyproject.toml index 8b1098c000307..238d90b102906 100644 --- a/libs/standard-tests/pyproject.toml +++ b/libs/standard-tests/pyproject.toml @@ -4,8 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "langchain-tests" -packages = [{ include = "langchain_standard_tests" }] -version = "0.3.1" +version = "0.3.2" description = "Standard tests for LangChain implementations" authors = ["Erick Friis "] readme = "README.md" diff --git a/libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py b/libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py index 5171c14c16230..35ad6674bc5a1 100644 --- a/libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py +++ b/libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py @@ -5,7 +5,7 @@ import pytest from langchain_core.stores import InMemoryStore -from langchain_standard_tests.integration_tests.base_store import ( +from langchain_tests.integration_tests.base_store import ( BaseStoreAsyncTests, BaseStoreSyncTests, ) diff --git a/libs/standard-tests/tests/unit_tests/test_in_memory_cache.py b/libs/standard-tests/tests/unit_tests/test_in_memory_cache.py index 4f67a876490d0..6c1a1647ade05 100644 --- a/libs/standard-tests/tests/unit_tests/test_in_memory_cache.py +++ b/libs/standard-tests/tests/unit_tests/test_in_memory_cache.py @@ -1,7 +1,7 @@ import pytest from langchain_core.caches import InMemoryCache -from langchain_standard_tests.integration_tests.cache import ( +from langchain_tests.integration_tests.cache import ( AsyncCacheTestSuite, SyncCacheTestSuite, ) diff --git a/libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py b/libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py index d1becb40c9eb4..8a3bf5d0a32b2 100644 --- a/libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py +++ b/libs/standard-tests/tests/unit_tests/test_in_memory_vectorstore.py @@ -4,7 +4,7 @@ VectorStore, ) -from langchain_standard_tests.integration_tests.vectorstores import ( +from langchain_tests.integration_tests.vectorstores import ( AsyncReadWriteTestSuite, ReadWriteTestSuite, )