From 18c4949fc3070aa673f10e48241dd9b8e84c52fe Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Wed, 16 Aug 2023 16:58:19 -0700 Subject: [PATCH 01/20] synthetic-data: adds simple synthetic data gen --- .../langchain/data_generation/__init__.py | 0 .../langchain/data_generation/prompts.py | 10 ++ .../langchain/data_generation/synthetic.py | 154 ++++++++++++++++++ .../data_generation/__init__.py | 0 .../data_generation/test_synthetic.py | 33 ++++ 5 files changed, 197 insertions(+) create mode 100644 libs/langchain/langchain/data_generation/__init__.py create mode 100644 libs/langchain/langchain/data_generation/prompts.py create mode 100644 libs/langchain/langchain/data_generation/synthetic.py create mode 100644 libs/langchain/tests/integration_tests/data_generation/__init__.py create mode 100644 libs/langchain/tests/integration_tests/data_generation/test_synthetic.py diff --git a/libs/langchain/langchain/data_generation/__init__.py b/libs/langchain/langchain/data_generation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/langchain/langchain/data_generation/prompts.py b/libs/langchain/langchain/data_generation/prompts.py new file mode 100644 index 0000000000000..6dccb32a04717 --- /dev/null +++ b/libs/langchain/langchain/data_generation/prompts.py @@ -0,0 +1,10 @@ +from libs.langchain.langchain.prompts.prompt import PromptTemplate + +EXAMPLE_PROMPT = PromptTemplate( + input_variables=["example"], template="example: {example}" +) + +SYNTHETIC_FEW_SHOT_PREFIX = ( + "This is a test about generating synthetic data about {subject}. Examples below:" +) +SYNTHETIC_FEW_SHOT_SUFFIX = "Now you try to generate synthetic data about {subject}:" diff --git a/libs/langchain/langchain/data_generation/synthetic.py b/libs/langchain/langchain/data_generation/synthetic.py new file mode 100644 index 0000000000000..5f024f39da96b --- /dev/null +++ b/libs/langchain/langchain/data_generation/synthetic.py @@ -0,0 +1,154 @@ +from typing import Dict, List + +from pydantic.class_validators import root_validator +import asyncio +from pydantic.main import BaseModel + +from langchain.chains.llm import LLMChain +from langchain.data_generation.prompts import ( + EXAMPLE_PROMPT, + SYNTHETIC_FEW_SHOT_PREFIX, + SYNTHETIC_FEW_SHOT_SUFFIX, +) +from langchain.llms import BaseLLM +from langchain.llms.openai import OpenAI +from langchain.prompts.few_shot import FewShotPromptTemplate +from langchain.prompts.prompt import PromptTemplate + + +class SyntheticDataGenerator(BaseModel): + """Generates synthetic data using the given LLM and few-shot template. + + Utilizes the provided LLM to produce synthetic data based on the + few-shot prompt template. Optionally, it evaluates the fitness of the + generated results using an evaluator function. + + Attributes: + template (FewShotPromptTemplate): Template for few-shot prompting. + runs (int): Number of runs for synthetic data generation. + llm (LLM): Large Language Model to use for generation. + llm_chain (LLMChain): LLM chain initialized with the LLM and few-shot template. + """ + + template: FewShotPromptTemplate + llm: BaseLLM = OpenAI(temperature=1) + _llm_chain: LLMChain = None # Will be populated post-init + results: list = [] + + class Config: + validate_assignment = True + + @root_validator(pre=False, skip_on_failure=True) + def set_llm_chain(cls, values): + llm = values.get("llm") + few_shot_template = values.get("template") + + values["_llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) + + return values + + def generate(self, subject: str, runs: int) -> List[str]: + """Generate synthetic data using the given subject matter. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data using the given subject. + + Returns: + List[str]: List of generated synthetic data. + """ + for _ in range(runs): + result = self._llm_chain.run(subject) + self.results.append(result) + return self.results + + async def agenerate(self, subject: str, runs: int) -> List[str]: + """Generate synthetic data using the given subject async. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data using the given subject async. + + Returns: + List[str]: List of generated synthetic data for the given subject. + """ + + async def run_chain(subject): + result = await self._llm_chain.arun(subject) + self.results.append(result) + + await asyncio.gather(*(run_chain(subject) for _ in range(runs))) + return self.results + + +def generate_synthetic( + examples: List[Dict[str, str]], + subject: str, + llm=OpenAI(temperature=1), + prompt_template: PromptTemplate = EXAMPLE_PROMPT, + runs: int = 10, # default value +) -> List[str]: + """Generate synthetic examples based on the provided examples and subject matter. + + This function uses the LLM to produce synthetic examples based on the + provided examples and the given subject matter. The prompt used for the + synthetic generation is constructed based on the examples and the + predefined few-shot prefix and suffix. + + Args: + examples (List[Dict[str, str]]): List of examples to be used in the prompt. + subject (str): The subject the synthetic data will be about. + llm (LLM, optional): Large Language Model to use for generation. Defaults to OpenAI with temperature 1. + prompt_template (PromptTemplate, optional): Prompt template to use. Defaults to EXAMPLE_PROMPT. + runs (int, optional): Number of synthetic examples to generate. Defaults to 10. + + Returns: + List[str]: List of generated synthetic examples. + """ + + prompt = FewShotPromptTemplate( + prefix=SYNTHETIC_FEW_SHOT_PREFIX, + examples=examples, + suffix=SYNTHETIC_FEW_SHOT_SUFFIX, + input_variables=["subject"], + example_prompt=prompt_template, + ) + + generator = SyntheticDataGenerator(template=prompt, llm=llm) + return generator.generate(subject, runs) + + +async def agenerate_synthetic( + examples: List[Dict[str, str]], + subject: str, + llm=OpenAI(temperature=1), + prompt_template: PromptTemplate = EXAMPLE_PROMPT, + runs: int = 10, # default value +) -> List[str]: + """Generate synthetic examples based on the provided examples and the subject matter. + + This function uses the LLM to produce synthetic examples based on the + provided examples and the given subject matter. The prompt used for the + synthetic generation is constructed based on the examples and the + predefined few-shot prefix and suffix. + + Args: + examples (List[Dict[str, str]]): List of examples to be used in the prompt. + subject (str): The subject the synthetic data will be about. + llm (LLM, optional): Large Language Model to use for generation. Defaults to OpenAI with temperature 1. + prompt_template (PromptTemplate, optional): Prompt template to use. Defaults to EXAMPLE_PROMPT. + runs (int, optional): Number of synthetic examples to generate. Defaults to 10. + + Returns: + List[str]: List of generated synthetic examples. + """ + + prompt = FewShotPromptTemplate( + prefix=SYNTHETIC_FEW_SHOT_PREFIX, + examples=examples, + suffix=SYNTHETIC_FEW_SHOT_SUFFIX, + input_variables=["subject"], + example_prompt=prompt_template, + ) + generator = SyntheticDataGenerator(template=prompt, llm=llm) + return await generator.agenerate(subject, runs) diff --git a/libs/langchain/tests/integration_tests/data_generation/__init__.py b/libs/langchain/tests/integration_tests/data_generation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py b/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py new file mode 100644 index 0000000000000..e046d8e3a2033 --- /dev/null +++ b/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py @@ -0,0 +1,33 @@ +import pytest as pytest +from langchain.data_generation.synthetic import generate_synthetic, agenerate_synthetic + +examples = [ + # all examples must be in format of dict with a key example -> value the example itself + { + "example": "Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: $350" + }, + { + "example": "Patient ID: 789012, Patient Name: Jane Smith, Diagnosis Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim Amount: $120" + }, +] + + +@pytest.mark.requires("openai") +async def test_generate_synthetic(): + synthetic_results = generate_synthetic(examples, "medical_billing", runs=10) + assert len(synthetic_results) == 10 + for row in synthetic_results: + assert len(row) > 0 + assert isinstance(row, (str,)) + print(synthetic_results) + + +@pytest.mark.requires("openai") +@pytest.mark.asyncio +async def test_agenerate_synthetic(): + synthetic_results = await agenerate_synthetic(examples, "medical_billing", runs=10) + assert len(synthetic_results) == 10 + for row in synthetic_results: + assert len(row) > 0 + assert isinstance(row, (str,)) + print(synthetic_results) From d70f5735ec2cbafaef9610d0b2225e8f8b0d5395 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Thu, 17 Aug 2023 18:10:24 -0700 Subject: [PATCH 02/20] synthetic-data: adds fc --- .../langchain/data_generation/base.py | 80 +++++++++ .../langchain/data_generation/openai.py | 52 ++++++ .../langchain/data_generation/prompts.py | 11 +- .../langchain/data_generation/synthetic.py | 154 ------------------ .../data_generation/test_openai.py | 64 ++++++++ .../data_generation/test_synthetic.py | 33 ---- 6 files changed, 201 insertions(+), 193 deletions(-) create mode 100644 libs/langchain/langchain/data_generation/base.py create mode 100644 libs/langchain/langchain/data_generation/openai.py delete mode 100644 libs/langchain/langchain/data_generation/synthetic.py create mode 100644 libs/langchain/tests/integration_tests/data_generation/test_openai.py delete mode 100644 libs/langchain/tests/integration_tests/data_generation/test_synthetic.py diff --git a/libs/langchain/langchain/data_generation/base.py b/libs/langchain/langchain/data_generation/base.py new file mode 100644 index 0000000000000..a7a589f630926 --- /dev/null +++ b/libs/langchain/langchain/data_generation/base.py @@ -0,0 +1,80 @@ +import asyncio +from typing import List, Optional + +from pydantic.class_validators import root_validator +from pydantic.error_wrappers import ValidationError +from pydantic.main import BaseModel + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.prompts.few_shot import FewShotPromptTemplate +from langchain.schema.language_model import BaseLanguageModel + + +class SyntheticDataGenerator(BaseModel): + """Generates synthetic data using the given LLM and few-shot template. + + Utilizes the provided LLM to produce synthetic data based on the + few-shot prompt template. Optionally, it evaluates the fitness of the + generated results using an evaluator function. + + Attributes: + template (FewShotPromptTemplate): Template for few-shot prompting. + runs (int): Number of runs for synthetic data generation. + llm (LLM): Large Language Model to use for generation. + llm_chain (LLMChain): LLM chain initialized with the LLM and few-shot template. + """ + + template: FewShotPromptTemplate + llm: Optional[BaseLanguageModel] = None + results: list = [] + llm_chain: Optional[Chain] = None + + class Config: + validate_assignment = True + + @root_validator(pre=False, skip_on_failure=True) + def set_llm_chain(cls, values): + llm_chain = values.get("llm_chain") + llm = values.get("llm") + few_shot_template = values.get("template") + + if not llm_chain: # If llm_chain is None or not present + if llm is None or few_shot_template is None: + raise ValidationError("Both llm and few_shot_template must be provided if llm_chain is not given.") + values["llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) + + return values + + def generate(self, subject: str, runs: int, extra: str = "") -> List[str]: + """Generate synthetic data using the given subject string. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data using the given subject. + + Returns: + List[str]: List of generated synthetic data. + """ + for _ in range(runs): + result = self.llm_chain.run(subject, extra) + self.results.append(result) + return self.results + + async def agenerate(self, subject: str, runs: int, extra: str = "") -> List[str]: + """Generate synthetic data using the given subject async. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data using the given subject async. + + Returns: + List[str]: List of generated synthetic data for the given subject. + """ + + async def run_chain(subject: str, extra: str): + result = await self.llm_chain.arun(subject, extra) + self.results.append(result) + + await asyncio.gather(*(run_chain(subject) for _ in range(runs))) + return self.results diff --git a/libs/langchain/langchain/data_generation/openai.py b/libs/langchain/langchain/data_generation/openai.py new file mode 100644 index 0000000000000..aabf3ac218c7f --- /dev/null +++ b/libs/langchain/langchain/data_generation/openai.py @@ -0,0 +1,52 @@ +from typing import Optional, Any, Dict, Type, Union + +from pydantic.main import BaseModel + +from langchain import BasePromptTemplate, PromptTemplate +from langchain.chains.openai_functions import create_structured_output_chain +from langchain.data_generation.base import SyntheticDataGenerator +from langchain.schema import BaseLLMOutputParser +from langchain.schema.language_model import BaseLanguageModel + +OPENAI_TEMPLATE = PromptTemplate( + input_variables=["example"], template="{example}" +) + + +def create_openai_data_generator( + output_schema: Union[Dict[str, Any], Type[BaseModel]], + llm: BaseLanguageModel, + prompt: BasePromptTemplate, + output_parser: Optional[BaseLLMOutputParser] = None, + **kwargs: Any +) -> SyntheticDataGenerator: + """ + Create an instance of SyntheticDataGenerator tailored for OpenAI models. + + This function creates an LLM chain designed for structured output based on the provided schema, + language model, and prompt template. The resulting chain is then used to instantiate and return + a SyntheticDataGenerator. + + Args: + output_schema (Union[Dict[str, Any], Type[BaseModel]]): Schema for expected output. This can be either + a dictionary representing a valid JsonSchema or a Pydantic BaseModel class. + llm (BaseLanguageModel): Language model to use. Should support the OpenAI function-calling API. + prompt (BasePromptTemplate): Template to be used for generating prompts. + output_parser (Optional[BaseLLMOutputParser], optional): Parser for processing model outputs. If none + is provided, a default will be inferred from the function types. + **kwargs: Additional keyword arguments to be passed to `create_structured_output_chain`. + + Returns: + SyntheticDataGenerator: An instance of the data generator set up with the constructed chain. + + Usage: + To generate synthetic data with a structured output, first define your desired output schema. Then, + use this function to create a SyntheticDataGenerator instance. After obtaining the generator, you + can utilize its methods to produce the desired synthetic data. + """ + # Create function calling chain to ensure structured output + chain = create_structured_output_chain(output_schema, llm, prompt, output_parser=output_parser, **kwargs) + + # Create the SyntheticDataGenerator instance with the created chain + generator = SyntheticDataGenerator(template=prompt, llm_chain=chain) + return generator diff --git a/libs/langchain/langchain/data_generation/prompts.py b/libs/langchain/langchain/data_generation/prompts.py index 6dccb32a04717..98ef5fe8004da 100644 --- a/libs/langchain/langchain/data_generation/prompts.py +++ b/libs/langchain/langchain/data_generation/prompts.py @@ -1,10 +1,9 @@ from libs.langchain.langchain.prompts.prompt import PromptTemplate -EXAMPLE_PROMPT = PromptTemplate( - input_variables=["example"], template="example: {example}" +DEFAULT_PROMPT = PromptTemplate( + input_variables=["example"], template="{example}" ) -SYNTHETIC_FEW_SHOT_PREFIX = ( - "This is a test about generating synthetic data about {subject}. Examples below:" -) -SYNTHETIC_FEW_SHOT_SUFFIX = "Now you try to generate synthetic data about {subject}:" +SYNTHETIC_FEW_SHOT_PREFIX = "This is a test about generating synthetic data about {subject}. {extra}. Examples below:" +SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure that each synthetic you + "generate is different from the others, but based on the examples above:""" diff --git a/libs/langchain/langchain/data_generation/synthetic.py b/libs/langchain/langchain/data_generation/synthetic.py deleted file mode 100644 index 5f024f39da96b..0000000000000 --- a/libs/langchain/langchain/data_generation/synthetic.py +++ /dev/null @@ -1,154 +0,0 @@ -from typing import Dict, List - -from pydantic.class_validators import root_validator -import asyncio -from pydantic.main import BaseModel - -from langchain.chains.llm import LLMChain -from langchain.data_generation.prompts import ( - EXAMPLE_PROMPT, - SYNTHETIC_FEW_SHOT_PREFIX, - SYNTHETIC_FEW_SHOT_SUFFIX, -) -from langchain.llms import BaseLLM -from langchain.llms.openai import OpenAI -from langchain.prompts.few_shot import FewShotPromptTemplate -from langchain.prompts.prompt import PromptTemplate - - -class SyntheticDataGenerator(BaseModel): - """Generates synthetic data using the given LLM and few-shot template. - - Utilizes the provided LLM to produce synthetic data based on the - few-shot prompt template. Optionally, it evaluates the fitness of the - generated results using an evaluator function. - - Attributes: - template (FewShotPromptTemplate): Template for few-shot prompting. - runs (int): Number of runs for synthetic data generation. - llm (LLM): Large Language Model to use for generation. - llm_chain (LLMChain): LLM chain initialized with the LLM and few-shot template. - """ - - template: FewShotPromptTemplate - llm: BaseLLM = OpenAI(temperature=1) - _llm_chain: LLMChain = None # Will be populated post-init - results: list = [] - - class Config: - validate_assignment = True - - @root_validator(pre=False, skip_on_failure=True) - def set_llm_chain(cls, values): - llm = values.get("llm") - few_shot_template = values.get("template") - - values["_llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) - - return values - - def generate(self, subject: str, runs: int) -> List[str]: - """Generate synthetic data using the given subject matter. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data using the given subject. - - Returns: - List[str]: List of generated synthetic data. - """ - for _ in range(runs): - result = self._llm_chain.run(subject) - self.results.append(result) - return self.results - - async def agenerate(self, subject: str, runs: int) -> List[str]: - """Generate synthetic data using the given subject async. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data using the given subject async. - - Returns: - List[str]: List of generated synthetic data for the given subject. - """ - - async def run_chain(subject): - result = await self._llm_chain.arun(subject) - self.results.append(result) - - await asyncio.gather(*(run_chain(subject) for _ in range(runs))) - return self.results - - -def generate_synthetic( - examples: List[Dict[str, str]], - subject: str, - llm=OpenAI(temperature=1), - prompt_template: PromptTemplate = EXAMPLE_PROMPT, - runs: int = 10, # default value -) -> List[str]: - """Generate synthetic examples based on the provided examples and subject matter. - - This function uses the LLM to produce synthetic examples based on the - provided examples and the given subject matter. The prompt used for the - synthetic generation is constructed based on the examples and the - predefined few-shot prefix and suffix. - - Args: - examples (List[Dict[str, str]]): List of examples to be used in the prompt. - subject (str): The subject the synthetic data will be about. - llm (LLM, optional): Large Language Model to use for generation. Defaults to OpenAI with temperature 1. - prompt_template (PromptTemplate, optional): Prompt template to use. Defaults to EXAMPLE_PROMPT. - runs (int, optional): Number of synthetic examples to generate. Defaults to 10. - - Returns: - List[str]: List of generated synthetic examples. - """ - - prompt = FewShotPromptTemplate( - prefix=SYNTHETIC_FEW_SHOT_PREFIX, - examples=examples, - suffix=SYNTHETIC_FEW_SHOT_SUFFIX, - input_variables=["subject"], - example_prompt=prompt_template, - ) - - generator = SyntheticDataGenerator(template=prompt, llm=llm) - return generator.generate(subject, runs) - - -async def agenerate_synthetic( - examples: List[Dict[str, str]], - subject: str, - llm=OpenAI(temperature=1), - prompt_template: PromptTemplate = EXAMPLE_PROMPT, - runs: int = 10, # default value -) -> List[str]: - """Generate synthetic examples based on the provided examples and the subject matter. - - This function uses the LLM to produce synthetic examples based on the - provided examples and the given subject matter. The prompt used for the - synthetic generation is constructed based on the examples and the - predefined few-shot prefix and suffix. - - Args: - examples (List[Dict[str, str]]): List of examples to be used in the prompt. - subject (str): The subject the synthetic data will be about. - llm (LLM, optional): Large Language Model to use for generation. Defaults to OpenAI with temperature 1. - prompt_template (PromptTemplate, optional): Prompt template to use. Defaults to EXAMPLE_PROMPT. - runs (int, optional): Number of synthetic examples to generate. Defaults to 10. - - Returns: - List[str]: List of generated synthetic examples. - """ - - prompt = FewShotPromptTemplate( - prefix=SYNTHETIC_FEW_SHOT_PREFIX, - examples=examples, - suffix=SYNTHETIC_FEW_SHOT_SUFFIX, - input_variables=["subject"], - example_prompt=prompt_template, - ) - generator = SyntheticDataGenerator(template=prompt, llm=llm) - return await generator.agenerate(subject, runs) diff --git a/libs/langchain/tests/integration_tests/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/data_generation/test_openai.py new file mode 100644 index 0000000000000..e12a23f8c2d7a --- /dev/null +++ b/libs/langchain/tests/integration_tests/data_generation/test_openai.py @@ -0,0 +1,64 @@ +import examples as examples +import pytest +from pydantic.types import conlist + +from langchain import FewShotPromptTemplate +from langchain.chat_models import ChatOpenAI +from langchain.data_generation.base import SyntheticDataGenerator +from langchain.data_generation.openai import create_openai_data_generator, OPENAI_TEMPLATE +from langchain.data_generation.prompts import SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX, DEFAULT_PROMPT +from pydantic import BaseModel + + +# Define the desired output schema for individual medical billing record +class MedicalBilling(BaseModel): + patient_id: int + patient_name: str + diagnosis_code: str + procedure_code: str + total_charge: float + insurance_claim_amount: float + + +examples = [ + { + "example": "Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: $350"}, + { + "example": "Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim Amount: $120"}, +] + +prompt_template = FewShotPromptTemplate( + prefix=SYNTHETIC_FEW_SHOT_PREFIX, + examples=examples, + suffix=SYNTHETIC_FEW_SHOT_SUFFIX, + input_variables=["subject", "extra"], + example_prompt=OPENAI_TEMPLATE, +) + + +@pytest.fixture(scope="function") +def synthetic_data_generator(): + return create_openai_data_generator( + output_schema=MedicalBilling, + llm=ChatOpenAI(temperature=1), # replace with your LLM instance + prompt=prompt_template + ) + + +@pytest.mark.requires("openai") +def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): + synthetic_results = synthetic_data_generator.generate("medical_billing", runs=10) + assert len(synthetic_results) == 10 + for row in synthetic_results: + assert isinstance(row, MedicalBilling) + print(synthetic_results) + + +@pytest.mark.requires("openai") +@pytest.mark.asyncio +async def test_agenerate_synthetic(synthetic_data_generator: SyntheticDataGenerator): + synthetic_results = await synthetic_data_generator.agenerate("medical_billing", runs=10) + assert len(synthetic_results) == 10 + for row in synthetic_results: + assert isinstance(row, MedicalBilling) + print(synthetic_results) diff --git a/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py b/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py deleted file mode 100644 index e046d8e3a2033..0000000000000 --- a/libs/langchain/tests/integration_tests/data_generation/test_synthetic.py +++ /dev/null @@ -1,33 +0,0 @@ -import pytest as pytest -from langchain.data_generation.synthetic import generate_synthetic, agenerate_synthetic - -examples = [ - # all examples must be in format of dict with a key example -> value the example itself - { - "example": "Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: $350" - }, - { - "example": "Patient ID: 789012, Patient Name: Jane Smith, Diagnosis Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim Amount: $120" - }, -] - - -@pytest.mark.requires("openai") -async def test_generate_synthetic(): - synthetic_results = generate_synthetic(examples, "medical_billing", runs=10) - assert len(synthetic_results) == 10 - for row in synthetic_results: - assert len(row) > 0 - assert isinstance(row, (str,)) - print(synthetic_results) - - -@pytest.mark.requires("openai") -@pytest.mark.asyncio -async def test_agenerate_synthetic(): - synthetic_results = await agenerate_synthetic(examples, "medical_billing", runs=10) - assert len(synthetic_results) == 10 - for row in synthetic_results: - assert len(row) > 0 - assert isinstance(row, (str,)) - print(synthetic_results) From dc9870dad8483969c73ba4489844a277cdcbe4aa Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Fri, 18 Aug 2023 12:11:44 -0700 Subject: [PATCH 03/20] synthetic-data: updates --- .../langchain/data_generation/base.py | 12 +++---- .../langchain/data_generation/prompts.py | 5 ++- .../data_generation/test_openai.py | 31 ++++++++++++++++--- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/libs/langchain/langchain/data_generation/base.py b/libs/langchain/langchain/data_generation/base.py index a7a589f630926..162f4353d951c 100644 --- a/libs/langchain/langchain/data_generation/base.py +++ b/libs/langchain/langchain/data_generation/base.py @@ -46,7 +46,7 @@ def set_llm_chain(cls, values): return values - def generate(self, subject: str, runs: int, extra: str = "") -> List[str]: + def generate(self, subject: str, runs: int, **kwargs) -> List[str]: """Generate synthetic data using the given subject string. Args: @@ -57,11 +57,11 @@ def generate(self, subject: str, runs: int, extra: str = "") -> List[str]: List[str]: List of generated synthetic data. """ for _ in range(runs): - result = self.llm_chain.run(subject, extra) + result = self.llm_chain.run(subject=subject, **kwargs) self.results.append(result) return self.results - async def agenerate(self, subject: str, runs: int, extra: str = "") -> List[str]: + async def agenerate(self, subject: str, runs: int, **kwargs) -> List[str]: """Generate synthetic data using the given subject async. Args: @@ -72,9 +72,9 @@ async def agenerate(self, subject: str, runs: int, extra: str = "") -> List[str] List[str]: List of generated synthetic data for the given subject. """ - async def run_chain(subject: str, extra: str): - result = await self.llm_chain.arun(subject, extra) + async def run_chain(subject: str, **kwargs): + result = await self.llm_chain.arun(subject=subject, **kwargs) self.results.append(result) - await asyncio.gather(*(run_chain(subject) for _ in range(runs))) + await asyncio.gather(*(run_chain(subject=subject, **kwargs) for _ in range(runs))) return self.results diff --git a/libs/langchain/langchain/data_generation/prompts.py b/libs/langchain/langchain/data_generation/prompts.py index 98ef5fe8004da..c05abca35fbd3 100644 --- a/libs/langchain/langchain/data_generation/prompts.py +++ b/libs/langchain/langchain/data_generation/prompts.py @@ -4,6 +4,5 @@ input_variables=["example"], template="{example}" ) -SYNTHETIC_FEW_SHOT_PREFIX = "This is a test about generating synthetic data about {subject}. {extra}. Examples below:" -SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure that each synthetic you - "generate is different from the others, but based on the examples above:""" +SYNTHETIC_FEW_SHOT_PREFIX = "This is a test about generating synthetic data about {subject}. Examples below:" +SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure that {extra}:""" diff --git a/libs/langchain/tests/integration_tests/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/data_generation/test_openai.py index e12a23f8c2d7a..c1c3f1f862d26 100644 --- a/libs/langchain/tests/integration_tests/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/data_generation/test_openai.py @@ -22,9 +22,29 @@ class MedicalBilling(BaseModel): examples = [ { - "example": "Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: $350"}, + "example": """Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, + Total Charge: $500, Insurance Claim Amount: $350""" + }, { - "example": "Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim Amount: $120"}, + "example": """Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis Code: M54.5, Procedure Code: 99213, + Total Charge: $150, Insurance Claim Amount: $120""" + }, + { + "example": """Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code: E11.9, Procedure Code: 99214, + Total Charge: $300, Insurance Claim Amount: $250""" + }, + { + "example": """Patient ID: 901234, Patient Name: Robert Miles, Diagnosis Code: B07.9, Procedure Code: 99204, + Total Charge: $200, Insurance Claim Amount: $160""" + }, + { + "example": """Patient ID: 567890, Patient Name: Clara Jensen, Diagnosis Code: F41.9, Procedure Code: 99205, + Total Charge: $450, Insurance Claim Amount: $310""" + }, + { + "example": """Patient ID: 234567, Patient Name: Alan Turing, Diagnosis Code: G40.909, Procedure Code: 99215, + Total Charge: $220, Insurance Claim Amount: $180""" + } ] prompt_template = FewShotPromptTemplate( @@ -47,7 +67,8 @@ def synthetic_data_generator(): @pytest.mark.requires("openai") def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): - synthetic_results = synthetic_data_generator.generate("medical_billing", runs=10) + synthetic_results = synthetic_data_generator.generate(subject="medical_billing", + extra="make sure the names are different", runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) @@ -57,7 +78,9 @@ def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): @pytest.mark.requires("openai") @pytest.mark.asyncio async def test_agenerate_synthetic(synthetic_data_generator: SyntheticDataGenerator): - synthetic_results = await synthetic_data_generator.agenerate("medical_billing", runs=10) + synthetic_results = await synthetic_data_generator.agenerate(subject="medical_billing", + extra="Each value is different than the one before it.", + runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) From bef7f0478df5474b9c1b7845fcbb24fb87a908a3 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Thu, 24 Aug 2023 14:57:07 -0400 Subject: [PATCH 04/20] synthetic-data: adds docstring useage examples, improves steerability of data gen, and likelihood of duplicates --- .../{ => chains}/data_generation/__init__.py | 0 .../langchain/chains/data_generation/base.py | 117 ++++++++++++++++++ .../{ => chains}/data_generation/openai.py | 2 +- .../{ => chains}/data_generation/prompts.py | 7 +- .../langchain/data_generation/base.py | 80 ------------ libs/langchain/langchain/prompts/base.py | 2 +- .../{ => chains}/data_generation/__init__.py | 0 .../data_generation/test_openai.py | 16 +-- 8 files changed, 133 insertions(+), 91 deletions(-) rename libs/langchain/langchain/{ => chains}/data_generation/__init__.py (100%) create mode 100644 libs/langchain/langchain/chains/data_generation/base.py rename libs/langchain/langchain/{ => chains}/data_generation/openai.py (97%) rename libs/langchain/langchain/{ => chains}/data_generation/prompts.py (67%) delete mode 100644 libs/langchain/langchain/data_generation/base.py rename libs/langchain/tests/integration_tests/{ => chains}/data_generation/__init__.py (100%) rename libs/langchain/tests/integration_tests/{ => chains}/data_generation/test_openai.py (79%) diff --git a/libs/langchain/langchain/data_generation/__init__.py b/libs/langchain/langchain/chains/data_generation/__init__.py similarity index 100% rename from libs/langchain/langchain/data_generation/__init__.py rename to libs/langchain/langchain/chains/data_generation/__init__.py diff --git a/libs/langchain/langchain/chains/data_generation/base.py b/libs/langchain/langchain/chains/data_generation/base.py new file mode 100644 index 0000000000000..d16f578a1849b --- /dev/null +++ b/libs/langchain/langchain/chains/data_generation/base.py @@ -0,0 +1,117 @@ +import asyncio +from typing import List, Optional + +from pydantic.class_validators import root_validator +from pydantic.error_wrappers import ValidationError +from pydantic.main import BaseModel + +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.prompts.few_shot import FewShotPromptTemplate +from langchain.schema.language_model import BaseLanguageModel + + +class SyntheticDataGenerator(BaseModel): + """ + Generates synthetic data using the given LLM and few-shot template. + + Utilizes the provided LLM to produce synthetic data based on the + few-shot prompt template. + + Attributes: + template (FewShotPromptTemplate): Template for few-shot prompting. + llm (Optional[BaseLanguageModel]): Large Language Model to use for generation. + llm_chain (Optional[Chain]): LLM chain initialized with the LLM and few-shot template. + example_input_key (str): Key to use for storing example inputs. + + Usage Example: + >>> template = FewShotPromptTemplate(...) + >>> llm = BaseLanguageModel(...) + >>> generator = SyntheticDataGenerator(template=template, llm=llm) + >>> results = generator.generate(subject="climate change", runs=5) + """ + + template: FewShotPromptTemplate + llm: Optional[BaseLanguageModel] = None + results: list = [] + llm_chain: Optional[Chain] = None + example_input_key: str = "example" + + class Config: + validate_assignment = True + + @root_validator(pre=False, skip_on_failure=True) + def set_llm_chain(cls, values): + llm_chain = values.get("llm_chain") + llm = values.get("llm") + few_shot_template = values.get("template") + + if not llm_chain: # If llm_chain is None or not present + if llm is None or few_shot_template is None: + raise ValidationError("Both llm and few_shot_template must be provided if llm_chain is not given.") + values["llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) + + return values + + @staticmethod + def _format_dict_to_string(input_dict: dict) -> str: + formatted_str = ', '.join([f"{key}: {value}" for key, value in input_dict.items()]) + return formatted_str + + def _update_examples(self, example): + """Prevents duplicates by adding previously generated examples to the few shot list""" + if isinstance(example, BaseModel): + formatted_example = self._format_dict_to_string(example.dict()) + elif isinstance(example, dict): + formatted_example = self._format_dict_to_string(example) + else: + formatted_example = str(example) + self.template.examples.pop(0) + self.template.examples.append({self.example_input_key: formatted_example}) + + def generate(self, subject: str, runs: int, **kwargs) -> List[str]: + """ + Generate synthetic data using the given subject string. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data. + extra (str): Extra instructions for steerability in data generation. + + Returns: + List[str]: List of generated synthetic data. + + Usage Example: + >>> results = generator.generate(subject="climate change", runs=5, extra="Focus on environmental impacts.") + """ + for _ in range(runs): + result = self.llm_chain.run(subject=subject, **kwargs) + self.results.append(result) + self._update_examples(result) + return self.results + + async def agenerate(self, subject: str, runs: int, extra: str = "", **kwargs) -> List[str]: + """ + Generate synthetic data using the given subject asynchronously. + + Note: Since the LLM calls run concurrently, you may have fewer duplicates by adding specific instructions to + the "extra" keyword argument. + + Args: + subject (str): The subject the synthetic data will be about. + runs (int): Number of times to generate the data asynchronously. + extra (str): Extra instructions for steerability in data generation. + + Returns: + List[str]: List of generated synthetic data for the given subject. + + Usage Example: + >>> results = await generator.agenerate(subject="climate change", runs=5, extra="Focus on env impacts.") + """ + + async def run_chain(subject: str, extra: str = "", **kwargs): + result = await self.llm_chain.arun(subject=subject, extra=extra, **kwargs) + self.results.append(result) + + await asyncio.gather(*(run_chain(subject=subject, extra=extra, **kwargs) for _ in range(runs))) + return self.results diff --git a/libs/langchain/langchain/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py similarity index 97% rename from libs/langchain/langchain/data_generation/openai.py rename to libs/langchain/langchain/chains/data_generation/openai.py index aabf3ac218c7f..a132bdab67d27 100644 --- a/libs/langchain/langchain/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -4,7 +4,7 @@ from langchain import BasePromptTemplate, PromptTemplate from langchain.chains.openai_functions import create_structured_output_chain -from langchain.data_generation.base import SyntheticDataGenerator +from langchain.chains.data_generation.base import SyntheticDataGenerator from langchain.schema import BaseLLMOutputParser from langchain.schema.language_model import BaseLanguageModel diff --git a/libs/langchain/langchain/data_generation/prompts.py b/libs/langchain/langchain/chains/data_generation/prompts.py similarity index 67% rename from libs/langchain/langchain/data_generation/prompts.py rename to libs/langchain/langchain/chains/data_generation/prompts.py index c05abca35fbd3..791e346f201c4 100644 --- a/libs/langchain/langchain/data_generation/prompts.py +++ b/libs/langchain/langchain/chains/data_generation/prompts.py @@ -1,8 +1,11 @@ from libs.langchain.langchain.prompts.prompt import PromptTemplate +DEFAULT_INPUT_KEY = "example" DEFAULT_PROMPT = PromptTemplate( - input_variables=["example"], template="{example}" + input_variables=[DEFAULT_INPUT_KEY], template="{example}" ) SYNTHETIC_FEW_SHOT_PREFIX = "This is a test about generating synthetic data about {subject}. Examples below:" -SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure that {extra}:""" +SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure to {extra}:""" + + diff --git a/libs/langchain/langchain/data_generation/base.py b/libs/langchain/langchain/data_generation/base.py deleted file mode 100644 index 162f4353d951c..0000000000000 --- a/libs/langchain/langchain/data_generation/base.py +++ /dev/null @@ -1,80 +0,0 @@ -import asyncio -from typing import List, Optional - -from pydantic.class_validators import root_validator -from pydantic.error_wrappers import ValidationError -from pydantic.main import BaseModel - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain.prompts.few_shot import FewShotPromptTemplate -from langchain.schema.language_model import BaseLanguageModel - - -class SyntheticDataGenerator(BaseModel): - """Generates synthetic data using the given LLM and few-shot template. - - Utilizes the provided LLM to produce synthetic data based on the - few-shot prompt template. Optionally, it evaluates the fitness of the - generated results using an evaluator function. - - Attributes: - template (FewShotPromptTemplate): Template for few-shot prompting. - runs (int): Number of runs for synthetic data generation. - llm (LLM): Large Language Model to use for generation. - llm_chain (LLMChain): LLM chain initialized with the LLM and few-shot template. - """ - - template: FewShotPromptTemplate - llm: Optional[BaseLanguageModel] = None - results: list = [] - llm_chain: Optional[Chain] = None - - class Config: - validate_assignment = True - - @root_validator(pre=False, skip_on_failure=True) - def set_llm_chain(cls, values): - llm_chain = values.get("llm_chain") - llm = values.get("llm") - few_shot_template = values.get("template") - - if not llm_chain: # If llm_chain is None or not present - if llm is None or few_shot_template is None: - raise ValidationError("Both llm and few_shot_template must be provided if llm_chain is not given.") - values["llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) - - return values - - def generate(self, subject: str, runs: int, **kwargs) -> List[str]: - """Generate synthetic data using the given subject string. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data using the given subject. - - Returns: - List[str]: List of generated synthetic data. - """ - for _ in range(runs): - result = self.llm_chain.run(subject=subject, **kwargs) - self.results.append(result) - return self.results - - async def agenerate(self, subject: str, runs: int, **kwargs) -> List[str]: - """Generate synthetic data using the given subject async. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data using the given subject async. - - Returns: - List[str]: List of generated synthetic data for the given subject. - """ - - async def run_chain(subject: str, **kwargs): - result = await self.llm_chain.arun(subject=subject, **kwargs) - self.results.append(result) - - await asyncio.gather(*(run_chain(subject=subject, **kwargs) for _ in range(runs))) - return self.results diff --git a/libs/langchain/langchain/prompts/base.py b/libs/langchain/langchain/prompts/base.py index 95d83256bfb89..96629b1451408 100644 --- a/libs/langchain/langchain/prompts/base.py +++ b/libs/langchain/langchain/prompts/base.py @@ -27,7 +27,7 @@ def jinja2_formatter(template: str, **kwargs: Any) -> str: def validate_jinja2(template: str, input_variables: List[str]) -> None: """ Validate that the input variables are valid for the template. - Issues an warning if missing or extra variables are found. + Issues a warning if missing or extra variables are found. Args: template: The template string. diff --git a/libs/langchain/tests/integration_tests/data_generation/__init__.py b/libs/langchain/tests/integration_tests/chains/data_generation/__init__.py similarity index 100% rename from libs/langchain/tests/integration_tests/data_generation/__init__.py rename to libs/langchain/tests/integration_tests/chains/data_generation/__init__.py diff --git a/libs/langchain/tests/integration_tests/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py similarity index 79% rename from libs/langchain/tests/integration_tests/data_generation/test_openai.py rename to libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py index c1c3f1f862d26..225e91ab2b8fa 100644 --- a/libs/langchain/tests/integration_tests/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py @@ -1,12 +1,10 @@ -import examples as examples import pytest -from pydantic.types import conlist from langchain import FewShotPromptTemplate from langchain.chat_models import ChatOpenAI -from langchain.data_generation.base import SyntheticDataGenerator -from langchain.data_generation.openai import create_openai_data_generator, OPENAI_TEMPLATE -from langchain.data_generation.prompts import SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX, DEFAULT_PROMPT +from langchain.chains.data_generation.base import SyntheticDataGenerator +from langchain.chains.data_generation.openai import create_openai_data_generator, OPENAI_TEMPLATE +from langchain.chains.data_generation.prompts import SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX from pydantic import BaseModel @@ -68,7 +66,10 @@ def synthetic_data_generator(): @pytest.mark.requires("openai") def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): synthetic_results = synthetic_data_generator.generate(subject="medical_billing", - extra="make sure the names are different", runs=10) + extra="""the name must be chosen at random. Make it + something you wouldn't normally choose. The CPT + codes must make sense with the ICD-10 code""", + runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) @@ -79,7 +80,8 @@ def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): @pytest.mark.asyncio async def test_agenerate_synthetic(synthetic_data_generator: SyntheticDataGenerator): synthetic_results = await synthetic_data_generator.agenerate(subject="medical_billing", - extra="Each value is different than the one before it.", + extra="""the name must be chosen at random. Make it + something you wouldn't normally choose.""", runs=10) assert len(synthetic_results) == 10 for row in synthetic_results: From 86430a41b7aaf4680fff0e32cecaf0695e4a2cdc Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Fri, 25 Aug 2023 11:54:46 -0400 Subject: [PATCH 05/20] synthetic-data: updates typing to use ChatOpenAI --- libs/langchain/langchain/chains/data_generation/openai.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py index a132bdab67d27..0f23959b727c0 100644 --- a/libs/langchain/langchain/chains/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -5,6 +5,7 @@ from langchain import BasePromptTemplate, PromptTemplate from langchain.chains.openai_functions import create_structured_output_chain from langchain.chains.data_generation.base import SyntheticDataGenerator +from langchain.chat_models import ChatOpenAI from langchain.schema import BaseLLMOutputParser from langchain.schema.language_model import BaseLanguageModel @@ -15,7 +16,7 @@ def create_openai_data_generator( output_schema: Union[Dict[str, Any], Type[BaseModel]], - llm: BaseLanguageModel, + llm: ChatOpenAI, prompt: BasePromptTemplate, output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any @@ -30,7 +31,7 @@ def create_openai_data_generator( Args: output_schema (Union[Dict[str, Any], Type[BaseModel]]): Schema for expected output. This can be either a dictionary representing a valid JsonSchema or a Pydantic BaseModel class. - llm (BaseLanguageModel): Language model to use. Should support the OpenAI function-calling API. + llm (ChatOpenAI): OpenAI language model to use. prompt (BasePromptTemplate): Template to be used for generating prompts. output_parser (Optional[BaseLLMOutputParser], optional): Parser for processing model outputs. If none is provided, a default will be inferred from the function types. From bb9fc2105aecd1db82d08f524887fcd3baa75b50 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Fri, 8 Sep 2023 17:45:48 -0700 Subject: [PATCH 06/20] synthetic-data: linting and typing additions --- .../langchain/chains/data_generation/base.py | 90 +++++++++++-------- .../chains/data_generation/openai.py | 61 +++++++------ .../chains/data_generation/prompts.py | 10 ++- .../chains/data_generation/test_openai.py | 77 +++++++++------- 4 files changed, 144 insertions(+), 94 deletions(-) diff --git a/libs/langchain/langchain/chains/data_generation/base.py b/libs/langchain/langchain/chains/data_generation/base.py index d16f578a1849b..1e8a5fe9f875b 100644 --- a/libs/langchain/langchain/chains/data_generation/base.py +++ b/libs/langchain/langchain/chains/data_generation/base.py @@ -1,8 +1,7 @@ import asyncio -from typing import List, Optional +from typing import Any, Dict, List, Optional, Union from pydantic.class_validators import root_validator -from pydantic.error_wrappers import ValidationError from pydantic.main import BaseModel from langchain.chains.base import Chain @@ -12,8 +11,7 @@ class SyntheticDataGenerator(BaseModel): - """ - Generates synthetic data using the given LLM and few-shot template. + """Generates synthetic data using the given LLM and few-shot template. Utilizes the provided LLM to produce synthetic data based on the few-shot prompt template. @@ -21,7 +19,7 @@ class SyntheticDataGenerator(BaseModel): Attributes: template (FewShotPromptTemplate): Template for few-shot prompting. llm (Optional[BaseLanguageModel]): Large Language Model to use for generation. - llm_chain (Optional[Chain]): LLM chain initialized with the LLM and few-shot template. + llm_chain (Optional[Chain]): LLM chain with the LLM and few-shot template. example_input_key (str): Key to use for storing example inputs. Usage Example: @@ -41,37 +39,43 @@ class Config: validate_assignment = True @root_validator(pre=False, skip_on_failure=True) - def set_llm_chain(cls, values): + def set_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: llm_chain = values.get("llm_chain") llm = values.get("llm") few_shot_template = values.get("template") if not llm_chain: # If llm_chain is None or not present if llm is None or few_shot_template is None: - raise ValidationError("Both llm and few_shot_template must be provided if llm_chain is not given.") + raise ValueError( + "Both llm and few_shot_template must be provided if llm_chain is " + "not given." + ) values["llm_chain"] = LLMChain(llm=llm, prompt=few_shot_template) return values @staticmethod - def _format_dict_to_string(input_dict: dict) -> str: - formatted_str = ', '.join([f"{key}: {value}" for key, value in input_dict.items()]) + def _format_dict_to_string(input_dict: Dict) -> str: + formatted_str = ", ".join( + [f"{key}: {value}" for key, value in input_dict.items()] + ) return formatted_str - def _update_examples(self, example): - """Prevents duplicates by adding previously generated examples to the few shot list""" - if isinstance(example, BaseModel): - formatted_example = self._format_dict_to_string(example.dict()) - elif isinstance(example, dict): - formatted_example = self._format_dict_to_string(example) - else: - formatted_example = str(example) - self.template.examples.pop(0) - self.template.examples.append({self.example_input_key: formatted_example}) - - def generate(self, subject: str, runs: int, **kwargs) -> List[str]: - """ - Generate synthetic data using the given subject string. + def _update_examples(self, example: Union[BaseModel, Dict[str, Any], str]) -> None: + """Prevents duplicates by adding previously generated examples to the few shot + list.""" + if self.template and self.template.examples: + if isinstance(example, BaseModel): + formatted_example = self._format_dict_to_string(example.dict()) + elif isinstance(example, dict): + formatted_example = self._format_dict_to_string(example) + else: + formatted_example = str(example) + self.template.examples.pop(0) + self.template.examples.append({self.example_input_key: formatted_example}) + + def generate(self, subject: str, runs: int, *args: Any, **kwargs: Any) -> List[str]: + """Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. @@ -82,19 +86,27 @@ def generate(self, subject: str, runs: int, **kwargs) -> List[str]: List[str]: List of generated synthetic data. Usage Example: - >>> results = generator.generate(subject="climate change", runs=5, extra="Focus on environmental impacts.") + >>> results = generator.generate(subject="climate change", runs=5, + extra="Focus on environmental impacts.") """ + if self.llm_chain is None: + raise ValueError( + "llm_chain is none, either set either llm_chain or llm at generator " + "construction" + ) for _ in range(runs): - result = self.llm_chain.run(subject=subject, **kwargs) + result = self.llm_chain.run(subject=subject, *args, **kwargs) self.results.append(result) self._update_examples(result) return self.results - async def agenerate(self, subject: str, runs: int, extra: str = "", **kwargs) -> List[str]: - """ - Generate synthetic data using the given subject asynchronously. + async def agenerate( + self, subject: str, runs: int, extra: str = "", *args: Any, **kwargs: Any + ) -> List[str]: + """Generate synthetic data using the given subject asynchronously. - Note: Since the LLM calls run concurrently, you may have fewer duplicates by adding specific instructions to + Note: Since the LLM calls run concurrently, + you may have fewer duplicates by adding specific instructions to the "extra" keyword argument. Args: @@ -106,12 +118,20 @@ async def agenerate(self, subject: str, runs: int, extra: str = "", **kwargs) -> List[str]: List of generated synthetic data for the given subject. Usage Example: - >>> results = await generator.agenerate(subject="climate change", runs=5, extra="Focus on env impacts.") + >>> results = await generator.agenerate(subject="climate change", runs=5, + extra="Focus on env impacts.") """ - async def run_chain(subject: str, extra: str = "", **kwargs): - result = await self.llm_chain.arun(subject=subject, extra=extra, **kwargs) - self.results.append(result) - - await asyncio.gather(*(run_chain(subject=subject, extra=extra, **kwargs) for _ in range(runs))) + async def run_chain( + subject: str, extra: str = "", *args: Any, **kwargs: Any + ) -> None: + if self.llm_chain is not None: + result = await self.llm_chain.arun( + subject=subject, extra=extra, *args, **kwargs + ) + self.results.append(result) + + await asyncio.gather( + *(run_chain(subject=subject, extra=extra) for _ in range(runs)) + ) return self.results diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py index 0f23959b727c0..b04a460bef2f6 100644 --- a/libs/langchain/langchain/chains/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -1,52 +1,63 @@ -from typing import Optional, Any, Dict, Type, Union +from typing import Any, Dict, Optional, Type, Union from pydantic.main import BaseModel from langchain import BasePromptTemplate, PromptTemplate -from langchain.chains.openai_functions import create_structured_output_chain from langchain.chains.data_generation.base import SyntheticDataGenerator +from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI from langchain.schema import BaseLLMOutputParser -from langchain.schema.language_model import BaseLanguageModel -OPENAI_TEMPLATE = PromptTemplate( - input_variables=["example"], template="{example}" -) +OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") def create_openai_data_generator( - output_schema: Union[Dict[str, Any], Type[BaseModel]], - llm: ChatOpenAI, - prompt: BasePromptTemplate, - output_parser: Optional[BaseLLMOutputParser] = None, - **kwargs: Any + output_schema: Union[Dict[str, Any], Type[BaseModel]], + llm: ChatOpenAI, + prompt: BasePromptTemplate, + output_parser: Optional[BaseLLMOutputParser] = None, + **kwargs: Any ) -> SyntheticDataGenerator: """ Create an instance of SyntheticDataGenerator tailored for OpenAI models. - This function creates an LLM chain designed for structured output based on the provided schema, - language model, and prompt template. The resulting chain is then used to instantiate and return - a SyntheticDataGenerator. + This function creates an LLM chain designed for structured output based on the + provided schema, language model, and prompt template. The resulting chain is then + used to instantiate and return a SyntheticDataGenerator. Args: - output_schema (Union[Dict[str, Any], Type[BaseModel]]): Schema for expected output. This can be either - a dictionary representing a valid JsonSchema or a Pydantic BaseModel class. + output_schema (Union[Dict[str, Any], Type[BaseModel]]): Schema for expected + output. This can be either a dictionary representing a valid JsonSchema or a + Pydantic BaseModel class. + + llm (ChatOpenAI): OpenAI language model to use. + prompt (BasePromptTemplate): Template to be used for generating prompts. - output_parser (Optional[BaseLLMOutputParser], optional): Parser for processing model outputs. If none - is provided, a default will be inferred from the function types. - **kwargs: Additional keyword arguments to be passed to `create_structured_output_chain`. - Returns: - SyntheticDataGenerator: An instance of the data generator set up with the constructed chain. + + output_parser (Optional[BaseLLMOutputParser], optional): Parser for + processing model outputs. If none is provided, a default will be inferred + from the function types. + + + **kwargs: Additional keyword arguments to be passed to + `create_structured_output_chain`. + + + Returns: SyntheticDataGenerator: An instance of the data generator set up with + the constructed chain. Usage: - To generate synthetic data with a structured output, first define your desired output schema. Then, - use this function to create a SyntheticDataGenerator instance. After obtaining the generator, you - can utilize its methods to produce the desired synthetic data. + To generate synthetic data with a structured output, first define your desired + output schema. Then, use this function to create a SyntheticDataGenerator + instance. After obtaining the generator, you can utilize its methods to produce + the desired synthetic data. """ # Create function calling chain to ensure structured output - chain = create_structured_output_chain(output_schema, llm, prompt, output_parser=output_parser, **kwargs) + chain = create_structured_output_chain( + output_schema, llm, prompt, output_parser=output_parser, **kwargs + ) # Create the SyntheticDataGenerator instance with the created chain generator = SyntheticDataGenerator(template=prompt, llm_chain=chain) diff --git a/libs/langchain/langchain/chains/data_generation/prompts.py b/libs/langchain/langchain/chains/data_generation/prompts.py index 791e346f201c4..4cd81213ddd34 100644 --- a/libs/langchain/langchain/chains/data_generation/prompts.py +++ b/libs/langchain/langchain/chains/data_generation/prompts.py @@ -5,7 +5,9 @@ input_variables=[DEFAULT_INPUT_KEY], template="{example}" ) -SYNTHETIC_FEW_SHOT_PREFIX = "This is a test about generating synthetic data about {subject}. Examples below:" -SYNTHETIC_FEW_SHOT_SUFFIX = """Now you generate synthetic data about {subject}. Make sure to {extra}:""" - - +SYNTHETIC_FEW_SHOT_PREFIX = ( + "This is a test about generating synthetic data about {subject}. Examples below:" +) +SYNTHETIC_FEW_SHOT_SUFFIX = ( + """Now you generate synthetic data about {subject}. Make sure to {extra}:""" +) diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py index 225e91ab2b8fa..4522e7e7f0ba4 100644 --- a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py @@ -1,11 +1,17 @@ import pytest +from pydantic import BaseModel from langchain import FewShotPromptTemplate -from langchain.chat_models import ChatOpenAI from langchain.chains.data_generation.base import SyntheticDataGenerator -from langchain.chains.data_generation.openai import create_openai_data_generator, OPENAI_TEMPLATE -from langchain.chains.data_generation.prompts import SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX -from pydantic import BaseModel +from langchain.chains.data_generation.openai import ( + OPENAI_TEMPLATE, + create_openai_data_generator, +) +from langchain.chains.data_generation.prompts import ( + SYNTHETIC_FEW_SHOT_PREFIX, + SYNTHETIC_FEW_SHOT_SUFFIX, +) +from langchain.chat_models import ChatOpenAI # Define the desired output schema for individual medical billing record @@ -20,29 +26,35 @@ class MedicalBilling(BaseModel): examples = [ { - "example": """Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: J20.9, Procedure Code: 99203, - Total Charge: $500, Insurance Claim Amount: $350""" + "example": """Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: + J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: + $350""" }, { - "example": """Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis Code: M54.5, Procedure Code: 99213, - Total Charge: $150, Insurance Claim Amount: $120""" + "example": """Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis + Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim + Amount: $120""" }, { - "example": """Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code: E11.9, Procedure Code: 99214, - Total Charge: $300, Insurance Claim Amount: $250""" + "example": """Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code: + E11.9, Procedure Code: 99214, Total Charge: $300, Insurance Claim Amount: + $250""" }, { - "example": """Patient ID: 901234, Patient Name: Robert Miles, Diagnosis Code: B07.9, Procedure Code: 99204, - Total Charge: $200, Insurance Claim Amount: $160""" + "example": """Patient ID: 901234, Patient Name: Robert Miles, Diagnosis Code: + B07.9, Procedure Code: 99204, Total Charge: $200, Insurance Claim Amount: + $160""" }, { - "example": """Patient ID: 567890, Patient Name: Clara Jensen, Diagnosis Code: F41.9, Procedure Code: 99205, - Total Charge: $450, Insurance Claim Amount: $310""" + "example": """Patient ID: 567890, Patient Name: Clara Jensen, Diagnosis Code: + F41.9, Procedure Code: 99205, Total Charge: $450, Insurance Claim Amount: + $310""" }, { - "example": """Patient ID: 234567, Patient Name: Alan Turing, Diagnosis Code: G40.909, Procedure Code: 99215, - Total Charge: $220, Insurance Claim Amount: $180""" - } + "example": """Patient ID: 234567, Patient Name: Alan Turing, Diagnosis Code: + G40.909, Procedure Code: 99215, Total Charge: $220, Insurance Claim Amount: + $180""" + }, ] prompt_template = FewShotPromptTemplate( @@ -55,21 +67,22 @@ class MedicalBilling(BaseModel): @pytest.fixture(scope="function") -def synthetic_data_generator(): +def synthetic_data_generator() -> SyntheticDataGenerator: return create_openai_data_generator( output_schema=MedicalBilling, llm=ChatOpenAI(temperature=1), # replace with your LLM instance - prompt=prompt_template + prompt=prompt_template, ) @pytest.mark.requires("openai") -def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): - synthetic_results = synthetic_data_generator.generate(subject="medical_billing", - extra="""the name must be chosen at random. Make it - something you wouldn't normally choose. The CPT - codes must make sense with the ICD-10 code""", - runs=10) +def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator) -> None: + synthetic_results = synthetic_data_generator.generate( + subject="medical_billing", + extra="""the name must be chosen at random. Make it something you wouldn't + normally choose.""", + runs=10, + ) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) @@ -78,11 +91,15 @@ def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator): @pytest.mark.requires("openai") @pytest.mark.asyncio -async def test_agenerate_synthetic(synthetic_data_generator: SyntheticDataGenerator): - synthetic_results = await synthetic_data_generator.agenerate(subject="medical_billing", - extra="""the name must be chosen at random. Make it - something you wouldn't normally choose.""", - runs=10) +async def test_agenerate_synthetic( + synthetic_data_generator: SyntheticDataGenerator, +) -> None: + synthetic_results = await synthetic_data_generator.agenerate( + subject="medical_billing", + extra="""the name must be chosen at random. Make it something you wouldn't + normally choose.""", + runs=10, + ) assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) From f263fc18cc56bde6821661c8fcc5afab547e5f55 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Mon, 11 Sep 2023 15:48:49 -0700 Subject: [PATCH 07/20] synthetic-data: fix --- .../integration_tests/chains/data_generation/test_openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py index 4522e7e7f0ba4..e767b6178be47 100644 --- a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py @@ -1,5 +1,5 @@ import pytest -from pydantic import BaseModel +from langchain.pydantic_v1 import BaseModel from langchain import FewShotPromptTemplate from langchain.chains.data_generation.base import SyntheticDataGenerator From 8a10cfa56a89192dff4835c1c0c430b9bb6a96d9 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Fri, 15 Sep 2023 18:55:10 -0400 Subject: [PATCH 08/20] synthetic-data: linting fixes --- libs/langchain/langchain/chains/data_generation/base.py | 2 +- libs/langchain/langchain/chains/data_generation/openai.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chains/data_generation/base.py b/libs/langchain/langchain/chains/data_generation/base.py index 1e8a5fe9f875b..c116f6c6ebbb0 100644 --- a/libs/langchain/langchain/chains/data_generation/base.py +++ b/libs/langchain/langchain/chains/data_generation/base.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Union from pydantic.class_validators import root_validator -from pydantic.main import BaseModel +from langchain.pydantic_v1 import BaseModel from langchain.chains.base import Chain from langchain.chains.llm import LLMChain diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py index b04a460bef2f6..a8ff8cd0308a8 100644 --- a/libs/langchain/langchain/chains/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -1,6 +1,6 @@ from typing import Any, Dict, Optional, Type, Union -from pydantic.main import BaseModel +from langchain.pydantic_v1 import BaseModel from langchain import BasePromptTemplate, PromptTemplate from langchain.chains.data_generation.base import SyntheticDataGenerator From ee0587db5bae44ed940b715ec87b272dbb8646d2 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Tue, 26 Sep 2023 17:39:02 -0700 Subject: [PATCH 09/20] synthetic-data: fixes linting issues --- libs/langchain/langchain/chains/data_generation/base.py | 4 +--- libs/langchain/langchain/chains/data_generation/openai.py | 3 +-- .../integration_tests/chains/data_generation/test_openai.py | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/libs/langchain/langchain/chains/data_generation/base.py b/libs/langchain/langchain/chains/data_generation/base.py index c116f6c6ebbb0..45932945c5132 100644 --- a/libs/langchain/langchain/chains/data_generation/base.py +++ b/libs/langchain/langchain/chains/data_generation/base.py @@ -1,12 +1,10 @@ import asyncio from typing import Any, Dict, List, Optional, Union -from pydantic.class_validators import root_validator -from langchain.pydantic_v1 import BaseModel - from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.prompts.few_shot import FewShotPromptTemplate +from langchain.pydantic_v1 import BaseModel, root_validator from langchain.schema.language_model import BaseLanguageModel diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py index a8ff8cd0308a8..3f910bb6da3f8 100644 --- a/libs/langchain/langchain/chains/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -1,11 +1,10 @@ from typing import Any, Dict, Optional, Type, Union -from langchain.pydantic_v1 import BaseModel - from langchain import BasePromptTemplate, PromptTemplate from langchain.chains.data_generation.base import SyntheticDataGenerator from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI +from langchain.pydantic_v1 import BaseModel from langchain.schema import BaseLLMOutputParser OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py index e767b6178be47..2257e2c07b93f 100644 --- a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py @@ -1,5 +1,4 @@ import pytest -from langchain.pydantic_v1 import BaseModel from langchain import FewShotPromptTemplate from langchain.chains.data_generation.base import SyntheticDataGenerator @@ -12,6 +11,7 @@ SYNTHETIC_FEW_SHOT_SUFFIX, ) from langchain.chat_models import ChatOpenAI +from langchain.pydantic_v1 import BaseModel # Define the desired output schema for individual medical billing record From b2ffc7b95dc0fd852040313489769c2ec46b26fa Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Tue, 26 Sep 2023 18:46:57 -0700 Subject: [PATCH 10/20] synthetic-data: fix linting --- libs/langchain/langchain/chains/data_generation/openai.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/langchain/langchain/chains/data_generation/openai.py index 3f910bb6da3f8..a80f34335b06d 100644 --- a/libs/langchain/langchain/chains/data_generation/openai.py +++ b/libs/langchain/langchain/chains/data_generation/openai.py @@ -1,11 +1,11 @@ from typing import Any, Dict, Optional, Type, Union -from langchain import BasePromptTemplate, PromptTemplate from langchain.chains.data_generation.base import SyntheticDataGenerator from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI +from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain.schema import BaseLLMOutputParser +from langchain.schema import BaseLLMOutputParser, BasePromptTemplate OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") From 4643aac017ece4b345367b8b67a06a3a0e3ab528 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Tue, 26 Sep 2023 19:08:33 -0700 Subject: [PATCH 11/20] synthetic-data: removes stray print statements --- .../integration_tests/chains/data_generation/test_openai.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py index 2257e2c07b93f..4fa9d883c7e1c 100644 --- a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py +++ b/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py @@ -86,7 +86,6 @@ def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator) -> assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) - print(synthetic_results) @pytest.mark.requires("openai") @@ -103,4 +102,3 @@ async def test_agenerate_synthetic( assert len(synthetic_results) == 10 for row in synthetic_results: assert isinstance(row, MedicalBilling) - print(synthetic_results) From 90663ff6d4d6574ab60adb6cfed17f1b6917a531 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Wed, 27 Sep 2023 15:50:28 -0700 Subject: [PATCH 12/20] synthetic-data: moves things to experimental --- .../tabular_synthetic_data}/__init__.py | 0 .../tabular_synthetic_data}/base.py | 0 .../tabular_synthetic_data}/openai.py | 0 .../tabular_synthetic_data}/prompts.py | 0 .../chains/test_synthetic_data_openai.py} | 16 +++++++--------- .../chains/data_generation/__init__.py | 0 6 files changed, 7 insertions(+), 9 deletions(-) rename libs/{langchain/langchain/chains/data_generation => experimental/langchain_experimental/tabular_synthetic_data}/__init__.py (100%) rename libs/{langchain/langchain/chains/data_generation => experimental/langchain_experimental/tabular_synthetic_data}/base.py (100%) rename libs/{langchain/langchain/chains/data_generation => experimental/langchain_experimental/tabular_synthetic_data}/openai.py (100%) rename libs/{langchain/langchain/chains/data_generation => experimental/langchain_experimental/tabular_synthetic_data}/prompts.py (100%) rename libs/{langchain/tests/integration_tests/chains/data_generation/test_openai.py => experimental/tests/integration_tests/chains/test_synthetic_data_openai.py} (88%) delete mode 100644 libs/langchain/tests/integration_tests/chains/data_generation/__init__.py diff --git a/libs/langchain/langchain/chains/data_generation/__init__.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/__init__.py similarity index 100% rename from libs/langchain/langchain/chains/data_generation/__init__.py rename to libs/experimental/langchain_experimental/tabular_synthetic_data/__init__.py diff --git a/libs/langchain/langchain/chains/data_generation/base.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/base.py similarity index 100% rename from libs/langchain/langchain/chains/data_generation/base.py rename to libs/experimental/langchain_experimental/tabular_synthetic_data/base.py diff --git a/libs/langchain/langchain/chains/data_generation/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py similarity index 100% rename from libs/langchain/langchain/chains/data_generation/openai.py rename to libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py diff --git a/libs/langchain/langchain/chains/data_generation/prompts.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py similarity index 100% rename from libs/langchain/langchain/chains/data_generation/prompts.py rename to libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py similarity index 88% rename from libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py rename to libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index 4fa9d883c7e1c..56d84fa3424e6 100644 --- a/libs/langchain/tests/integration_tests/chains/data_generation/test_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,17 +1,15 @@ import pytest from langchain import FewShotPromptTemplate -from langchain.chains.data_generation.base import SyntheticDataGenerator -from langchain.chains.data_generation.openai import ( - OPENAI_TEMPLATE, - create_openai_data_generator, -) -from langchain.chains.data_generation.prompts import ( - SYNTHETIC_FEW_SHOT_PREFIX, - SYNTHETIC_FEW_SHOT_SUFFIX, -) + from langchain.chat_models import ChatOpenAI from langchain.pydantic_v1 import BaseModel +from libs.experimental.langchain_experimental.tabular_synthetic_data.base import \ + SyntheticDataGenerator +from libs.experimental.langchain_experimental.tabular_synthetic_data.openai import \ + create_openai_data_generator, OPENAI_TEMPLATE +from libs.experimental.langchain_experimental.tabular_synthetic_data.prompts import \ + SYNTHETIC_FEW_SHOT_SUFFIX, SYNTHETIC_FEW_SHOT_PREFIX # Define the desired output schema for individual medical billing record diff --git a/libs/langchain/tests/integration_tests/chains/data_generation/__init__.py b/libs/langchain/tests/integration_tests/chains/data_generation/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 From 7153006e84c3a0a93c28109d0c187006e7ee0dc7 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Wed, 27 Sep 2023 16:41:38 -0700 Subject: [PATCH 13/20] synthetic-data: adds useage notebook --- .../use_cases/more/data_generation.ipynb | 216 ++++++++++++++++-- .../tabular_synthetic_data/openai.py | 3 +- .../tabular_synthetic_data/prompts.py | 2 +- .../chains/test_synthetic_data_openai.py | 6 +- 4 files changed, 206 insertions(+), 21 deletions(-) diff --git a/docs/extras/use_cases/more/data_generation.ipynb b/docs/extras/use_cases/more/data_generation.ipynb index dd7b617f5b323..ffb98849e0bd8 100644 --- a/docs/extras/use_cases/more/data_generation.ipynb +++ b/docs/extras/use_cases/more/data_generation.ipynb @@ -1,46 +1,230 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "id": "aa3571cc", "metadata": {}, "source": [ - "# Data generation\n", + "# Synthetic Data generation\n", "\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/data_generation.ipynb)\n", "\n", "## Use case\n", "\n", - "Creating synthethic language data can be beneficial for multiple reasons:\n", - "- providing data augmentation\n", - "- obtaining domain-specific examples\n", - "- increasing data diversity\n", - "- enabling quick iteration and experimentation\n", + "Synthetic data is artificially generated data, rather than data collected from real-world events. It's used to simulate real data without compromising privacy or encountering real-world limitations. \n", + "\n", + "Benefits of Synthetic Data:\n", + "\n", + "1. **Privacy and Security**: No real personal data at risk of breaches.\n", + "2. **Data Augmentation**: Expands datasets for machine learning.\n", + "3. **Flexibility**: Create specific or rare scenarios.\n", + "4. **Cost-effective**: Often cheaper than real-world data collection.\n", + "5. **Regulatory Compliance**: Helps navigate strict data protection laws.\n", + "6. **Reduction of Bias**: Potential to generate unbiased datasets.\n", + "7. **Model Robustness**: Can lead to better generalizing AI models.\n", + "8. **Rapid Prototyping**: Enables quick testing without real data.\n", + "9. **Controlled Experimentation**: Simulate specific conditions.\n", + "10. **Access to Data**: Alternative when real data isn't available.\n", + "\n", + "Note: Despite the benefits, synthetic data should be used carefully, as it may not always capture real-world complexities.\n", "\n", "## Quickstart\n", "\n", - "Let's see a very straightforward example of how we can use OpenAI functions for creating synthetic data in LangChain." + "In this notebook, we'll dive deep into generating synthetic medical billing records using the langchain library. This tool is particularly useful when you want to develop or test algorithms but don't want to use real patient data due to privacy concerns or data availability issues." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "bca57012", + "metadata": {}, + "source": [ + "### Setup\n", + "First, you'll need to have the langchain library installed, along with its dependencies. Since we're using the OpenAI generator chain, we'll install that as well. Since this is an experimental lib, we'll need to include `langchain_experimental` in our installs. We'll then import the necessary modules." ] }, { "cell_type": "code", "execution_count": null, - "id": "7ae36b66", - "metadata": { - "scrolled": true - }, + "id": "a0377478", + "metadata": {}, "outputs": [], "source": [ - "!pip install langchain openai \n", - "\n", + "# !pip install -U langchain langchain_experimental openai\n", "# Set env var OPENAI_API_KEY or load from a .env file:\n", "# import dotenv\n", - "# dotenv.load_dotenv()" + "# dotenv.load_dotenv()\n", + "\n", + "from langchain import FewShotPromptTemplate\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.pydantic_v1 import BaseModel\n", + "from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator\n", + "from langchain_experimental.tabular_synthetic_data.openai import create_openai_data_generator, OPENAI_TEMPLATE\n", + "from langchain_experimental.tabular_synthetic_data.prompts import SYNTHETIC_FEW_SHOT_SUFFIX, SYNTHETIC_FEW_SHOT_PREFIX\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "a5a0917b", + "metadata": {}, + "source": [ + "## 1. Define Your Data Model\n", + "Every dataset has a structure or a \"schema\". The MedicalBilling class below serves as our schema for the synthetic data. By defining this, we're informing our synthetic data generator about the shape and nature of data we expect." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "291bad6e", + "metadata": {}, + "outputs": [], + "source": [ + "class MedicalBilling(BaseModel):\n", + " patient_id: int\n", + " patient_name: str\n", + " diagnosis_code: str\n", + " procedure_code: str\n", + " total_charge: float\n", + " insurance_claim_amount: float\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2059ca63", + "metadata": {}, + "source": [ + "For instance, every record will have a `patient_id` that's an integer, a `patient_name` that's a string, and so on.\n", + "\n", + "## 2. Sample Data\n", + "To guide the synthetic data generator, it's useful to provide it with a few real-world-like examples. These examples serve as a \"seed\" - they're representative of the kind of data you want, and the generator will use them to create more data that looks similar.\n", + "\n", + "Here are some fictional medical billing records:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b989b792", + "metadata": {}, + "outputs": [], + "source": [ + "examples = [\n", + " {\"example\": \"\"\"Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: \n", + " J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: $350\"\"\"},\n", + " {\"example\": \"\"\"Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis \n", + " Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim Amount: $120\"\"\"},\n", + " {\"example\": \"\"\"Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code: \n", + " E11.9, Procedure Code: 99214, Total Charge: $300, Insurance Claim Amount: $250\"\"\"},\n", + "]\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "57e28809", + "metadata": {}, + "source": [ + "## 3. Craft a Prompt Template\n", + "The generator doesn't magically know how to create our data; we need to guide it. We do this by creating a prompt template. This template helps instruct the underlying language model on how to produce synthetic data in the desired format." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, + "id": "ea6e042e", + "metadata": {}, + "outputs": [], + "source": [ + "OPENAI_TEMPLATE = PromptTemplate(input_variables=[\"example\"], template=\"{example}\")\n", + "\n", + "prompt_template = FewShotPromptTemplate(\n", + " prefix=SYNTHETIC_FEW_SHOT_PREFIX,\n", + " examples=examples,\n", + " suffix=SYNTHETIC_FEW_SHOT_SUFFIX,\n", + " input_variables=[\"subject\", \"extra\"],\n", + " example_prompt=OPENAI_TEMPLATE,\n", + ")\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "fa6da3cb", + "metadata": {}, + "source": [ + "The `FewShotPromptTemplate` includes:\n", + "\n", + "- `prefix` and `suffix`: These likely contain guiding context or instructions.\n", + "- `examples`: The sample data we defined earlier.\n", + "- `input_variables`: These variables (\"subject\", \"extra\") are placeholders you can dynamically fill later. For instance, \"subject\" might be filled with \"medical_billing\" to guide the model further.\n", + "- `example_prompt`: This prompt template is the format we want each example row to take in our prompt.\n", + "\n", + "## 4. Creating the Data Generator\n", + "With the schema and the prompt ready, the next step is to create the data generator. This object knows how to communicate with the underlying language model to get synthetic data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b9ba911", + "metadata": {}, + "outputs": [], + "source": [ + "synthetic_data_generator = create_openai_data_generator(\n", + " output_schema=MedicalBilling,\n", + " llm=ChatOpenAI(temperature=1), # You'll need to replace with your actual Language Model instance\n", + " prompt=prompt_template,\n", + ")\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "a4198bd6", + "metadata": {}, + "source": [ + "## 5. Generate Synthetic Data\n", + "Finally, let's get our synthetic data!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a424c890", + "metadata": {}, + "outputs": [], + "source": [ + "synthetic_results = synthetic_data_generator.generate(\n", + " subject=\"medical_billing\",\n", + " extra=\"the name must be chosen at random. Make it something you wouldn't normally choose.\",\n", + " runs=10,\n", + ")\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "fa4402e9", + "metadata": {}, + "source": [ + "This command asks the generator to produce 10 synthetic medical billing records. The results are stored in `synthetic_results`. The output will be a list of the MedicalBilling pydantic models." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "53a4cbf9", + "metadata": {}, + "source": [ + "### Other implementations\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, "id": "9e715d94", "metadata": { "scrolled": true @@ -429,7 +613,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.11.3" } }, "nbformat": 4, diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index a80f34335b06d..5e9d96baba1d5 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -1,11 +1,12 @@ from typing import Any, Dict, Optional, Type, Union -from langchain.chains.data_generation.base import SyntheticDataGenerator from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.schema import BaseLLMOutputParser, BasePromptTemplate +from langchain_experimental.tabular_synthetic_data.base import \ + SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py index 4cd81213ddd34..1f32ef4ef0193 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py @@ -1,4 +1,4 @@ -from libs.langchain.langchain.prompts.prompt import PromptTemplate +from langchain.prompts.prompt import PromptTemplate DEFAULT_INPUT_KEY = "example" DEFAULT_PROMPT = PromptTemplate( diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index 56d84fa3424e6..28f0b7464a530 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -4,11 +4,11 @@ from langchain.chat_models import ChatOpenAI from langchain.pydantic_v1 import BaseModel -from libs.experimental.langchain_experimental.tabular_synthetic_data.base import \ +from langchain_experimental.tabular_synthetic_data.base import \ SyntheticDataGenerator -from libs.experimental.langchain_experimental.tabular_synthetic_data.openai import \ +from langchain_experimental.tabular_synthetic_data.openai import \ create_openai_data_generator, OPENAI_TEMPLATE -from libs.experimental.langchain_experimental.tabular_synthetic_data.prompts import \ +from langchain_experimental.tabular_synthetic_data.prompts import \ SYNTHETIC_FEW_SHOT_SUFFIX, SYNTHETIC_FEW_SHOT_PREFIX From b9254fb0464397e2c628cb3aeb85892e930116ec Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Thu, 28 Sep 2023 16:09:15 -0700 Subject: [PATCH 14/20] synthetic-data: fixes issue with import from main file --- docs/extras/use_cases/more/data_generation.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/extras/use_cases/more/data_generation.ipynb b/docs/extras/use_cases/more/data_generation.ipynb index ffb98849e0bd8..c5d00e94f1379 100644 --- a/docs/extras/use_cases/more/data_generation.ipynb +++ b/docs/extras/use_cases/more/data_generation.ipynb @@ -56,7 +56,7 @@ "# import dotenv\n", "# dotenv.load_dotenv()\n", "\n", - "from langchain import FewShotPromptTemplate\n", + "from langchain.prompts import FewShotPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.pydantic_v1 import BaseModel\n", "from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator\n", From dcbf9f9a3f91e64e2aab7000bc89b5eb1d8aa484 Mon Sep 17 00:00:00 2001 From: PaperMoose Date: Thu, 28 Sep 2023 16:24:08 -0700 Subject: [PATCH 15/20] synthetic-data: address wills comments --- docs/extras/use_cases/more/data_generation.ipynb | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/extras/use_cases/more/data_generation.ipynb b/docs/extras/use_cases/more/data_generation.ipynb index c5d00e94f1379..969f37cbe249f 100644 --- a/docs/extras/use_cases/more/data_generation.ipynb +++ b/docs/extras/use_cases/more/data_generation.ipynb @@ -21,11 +21,10 @@ "3. **Flexibility**: Create specific or rare scenarios.\n", "4. **Cost-effective**: Often cheaper than real-world data collection.\n", "5. **Regulatory Compliance**: Helps navigate strict data protection laws.\n", - "6. **Reduction of Bias**: Potential to generate unbiased datasets.\n", - "7. **Model Robustness**: Can lead to better generalizing AI models.\n", - "8. **Rapid Prototyping**: Enables quick testing without real data.\n", - "9. **Controlled Experimentation**: Simulate specific conditions.\n", - "10. **Access to Data**: Alternative when real data isn't available.\n", + "6. **Model Robustness**: Can lead to better generalizing AI models.\n", + "7. **Rapid Prototyping**: Enables quick testing without real data.\n", + "8. **Controlled Experimentation**: Simulate specific conditions.\n", + "9. **Access to Data**: Alternative when real data isn't available.\n", "\n", "Note: Despite the benefits, synthetic data should be used carefully, as it may not always capture real-world complexities.\n", "\n", @@ -51,12 +50,12 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install -U langchain langchain_experimental openai\n", + "!pip install -U langchain langchain_experimental openai\n", "# Set env var OPENAI_API_KEY or load from a .env file:\n", "# import dotenv\n", "# dotenv.load_dotenv()\n", "\n", - "from langchain.prompts import FewShotPromptTemplate\n", + "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.pydantic_v1 import BaseModel\n", "from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator\n", @@ -613,7 +612,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.11.4" } }, "nbformat": 4, From dba01d9d10c614907efe4701c89487b7eafcd2cb Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 28 Sep 2023 16:27:57 -0700 Subject: [PATCH 16/20] format --- .../tabular_synthetic_data/openai.py | 3 +-- .../chains/test_synthetic_data_openai.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index 5e9d96baba1d5..352ce8a6539d0 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -5,8 +5,7 @@ from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.schema import BaseLLMOutputParser, BasePromptTemplate -from langchain_experimental.tabular_synthetic_data.base import \ - SyntheticDataGenerator +from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index 28f0b7464a530..bf1d4b0119a9a 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,15 +1,17 @@ import pytest from langchain import FewShotPromptTemplate - from langchain.chat_models import ChatOpenAI from langchain.pydantic_v1 import BaseModel -from langchain_experimental.tabular_synthetic_data.base import \ - SyntheticDataGenerator -from langchain_experimental.tabular_synthetic_data.openai import \ - create_openai_data_generator, OPENAI_TEMPLATE -from langchain_experimental.tabular_synthetic_data.prompts import \ - SYNTHETIC_FEW_SHOT_SUFFIX, SYNTHETIC_FEW_SHOT_PREFIX +from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator +from langchain_experimental.tabular_synthetic_data.openai import ( + OPENAI_TEMPLATE, + create_openai_data_generator, +) +from langchain_experimental.tabular_synthetic_data.prompts import ( + SYNTHETIC_FEW_SHOT_PREFIX, + SYNTHETIC_FEW_SHOT_SUFFIX, +) # Define the desired output schema for individual medical billing record From 1b50c2b4a4bf5d09e996b3903911f26aa80f4782 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 28 Sep 2023 16:34:10 -0700 Subject: [PATCH 17/20] format --- .../langchain_experimental/synthetic_data/__init__.py | 2 +- .../langchain_experimental/tabular_synthetic_data/openai.py | 3 ++- .../integration_tests/chains/test_synthetic_data_openai.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/libs/experimental/langchain_experimental/synthetic_data/__init__.py b/libs/experimental/langchain_experimental/synthetic_data/__init__.py index de07dfc0a7be3..d7396f2de94a6 100644 --- a/libs/experimental/langchain_experimental/synthetic_data/__init__.py +++ b/libs/experimental/langchain_experimental/synthetic_data/__init__.py @@ -2,9 +2,9 @@ from langchain.chains.base import Chain from langchain.chains.llm import LLMChain -from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel +from langchain.prompts import PromptTemplate from langchain_experimental.synthetic_data.prompts import SENTENCE_PROMPT diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index 352ce8a6539d0..ec3818ee82e1e 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -2,9 +2,10 @@ from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI +from langchain.schema import BaseLLMOutputParser, BasePromptTemplate + from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain.schema import BaseLLMOutputParser, BasePromptTemplate from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index bf1d4b0119a9a..d8d2fd3fcd0cd 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,7 +1,7 @@ import pytest +from langchain.chat_models import ChatOpenAI from langchain import FewShotPromptTemplate -from langchain.chat_models import ChatOpenAI from langchain.pydantic_v1 import BaseModel from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator from langchain_experimental.tabular_synthetic_data.openai import ( From 2856237aa34fb1cfa9d06687cae6f4a1079716f4 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 28 Sep 2023 17:04:47 -0700 Subject: [PATCH 18/20] ? --- .../tabular_synthetic_data/openai.py | 3 ++- .../chains/test_synthetic_data_openai.py | 11 ++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index ec3818ee82e1e..21e378c00e75e 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -6,7 +6,8 @@ from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator +from langchain_experimental.tabular_synthetic_data.base import \ + SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index d8d2fd3fcd0cd..b2d12b6a47506 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -3,15 +3,12 @@ from langchain import FewShotPromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator +from langchain_experimental.tabular_synthetic_data.base import \ + SyntheticDataGenerator from langchain_experimental.tabular_synthetic_data.openai import ( - OPENAI_TEMPLATE, - create_openai_data_generator, -) + OPENAI_TEMPLATE, create_openai_data_generator) from langchain_experimental.tabular_synthetic_data.prompts import ( - SYNTHETIC_FEW_SHOT_PREFIX, - SYNTHETIC_FEW_SHOT_SUFFIX, -) + SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX) # Define the desired output schema for individual medical billing record From 6fdf294313515fb2ed31bac0948f0ccd8bb9cc42 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Thu, 28 Sep 2023 17:29:06 -0700 Subject: [PATCH 19/20] ? --- .../synthetic_data/__init__.py | 3 +-- .../tabular_synthetic_data/openai.py | 6 ++---- .../chains/test_synthetic_data_openai.py | 15 +++++++++------ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/libs/experimental/langchain_experimental/synthetic_data/__init__.py b/libs/experimental/langchain_experimental/synthetic_data/__init__.py index d7396f2de94a6..9e41382f2ef89 100644 --- a/libs/experimental/langchain_experimental/synthetic_data/__init__.py +++ b/libs/experimental/langchain_experimental/synthetic_data/__init__.py @@ -2,9 +2,8 @@ from langchain.chains.base import Chain from langchain.chains.llm import LLMChain -from langchain.schema.language_model import BaseLanguageModel - from langchain.prompts import PromptTemplate +from langchain.schema.language_model import BaseLanguageModel from langchain_experimental.synthetic_data.prompts import SENTENCE_PROMPT diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index 21e378c00e75e..352ce8a6539d0 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -2,12 +2,10 @@ from langchain.chains.openai_functions import create_structured_output_chain from langchain.chat_models import ChatOpenAI -from langchain.schema import BaseLLMOutputParser, BasePromptTemplate - from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain_experimental.tabular_synthetic_data.base import \ - SyntheticDataGenerator +from langchain.schema import BaseLLMOutputParser, BasePromptTemplate +from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index b2d12b6a47506..47c77c241d504 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,14 +1,17 @@ import pytest -from langchain.chat_models import ChatOpenAI -from langchain import FewShotPromptTemplate +from langchain.chat_models import ChatOpenAI +from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.pydantic_v1 import BaseModel -from langchain_experimental.tabular_synthetic_data.base import \ - SyntheticDataGenerator +from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator from langchain_experimental.tabular_synthetic_data.openai import ( - OPENAI_TEMPLATE, create_openai_data_generator) + OPENAI_TEMPLATE, + create_openai_data_generator, +) from langchain_experimental.tabular_synthetic_data.prompts import ( - SYNTHETIC_FEW_SHOT_PREFIX, SYNTHETIC_FEW_SHOT_SUFFIX) + SYNTHETIC_FEW_SHOT_PREFIX, + SYNTHETIC_FEW_SHOT_SUFFIX, +) # Define the desired output schema for individual medical billing record From 42152151b612674415252b23f8471465b035de2c Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 28 Sep 2023 17:55:23 -0700 Subject: [PATCH 20/20] fmt --- .../langchain_experimental/synthetic_data/__init__.py | 1 + .../langchain_experimental/tabular_synthetic_data/openai.py | 1 + .../integration_tests/chains/test_synthetic_data_openai.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/synthetic_data/__init__.py b/libs/experimental/langchain_experimental/synthetic_data/__init__.py index 9e41382f2ef89..de07dfc0a7be3 100644 --- a/libs/experimental/langchain_experimental/synthetic_data/__init__.py +++ b/libs/experimental/langchain_experimental/synthetic_data/__init__.py @@ -4,6 +4,7 @@ from langchain.chains.llm import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel + from langchain_experimental.synthetic_data.prompts import SENTENCE_PROMPT diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index 352ce8a6539d0..41d3575f90dd8 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -5,6 +5,7 @@ from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.schema import BaseLLMOutputParser, BasePromptTemplate + from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index 47c77c241d504..7366901b989e6 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,8 +1,8 @@ import pytest - from langchain.chat_models import ChatOpenAI from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.pydantic_v1 import BaseModel + from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator from langchain_experimental.tabular_synthetic_data.openai import ( OPENAI_TEMPLATE,