diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 00000000..ea5f567a
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,116 @@
+name: pytests
+on:
+ push:
+ branches:
+ - master
+ paths:
+ - 'adala/**'
+ - '.github/workflows/tests.yml'
+ - 'requirements**'
+ tags-ignore:
+ - '**'
+ pull_request:
+ types:
+ - opened
+ - synchronize
+ - reopened
+ - ready_for_review
+ branches:
+ - master
+ - 'release/**'
+
+env:
+ CACHE_NAME_PREFIX: v1
+ OPENAI_API_KEY: test
+
+jobs:
+ run_pytest_ubuntu:
+ name: Ubuntu
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version:
+ - '3.8'
+ - '3.9'
+ - '3.10'
+ - '3.11'
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4.7.0
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - uses: actions/cache@v3.3.1
+ name: Configure pip cache
+ id: pip-cache
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/requirements-test.txt') }}
+ restore-keys: |
+ ${{ env.CACHE_NAME_PREFIX }}-${{ runner.os }}-pip-${{ matrix.python-version }}-
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip setuptools
+ pip install -U pip
+ pip install -r requirements.txt -r tests/requirements-test.txt
+ pip install -e .
+
+ - name: Run functional tests
+ run: |
+ cd tests/
+ pytest --junitxml report.xml --cov=. -vv
+
+
+
+
+ run_pytests_windows:
+ name: Windows
+ runs-on: windows-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version:
+ - '3.8'
+ - '3.9'
+ - '3.10'
+ - '3.11'
+
+ steps:
+ - uses: hmarr/debug-action@v2.1.0
+
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.head_sha }}
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - uses: actions/cache@v3
+ name: Configure pip cache
+ with:
+ path: ~\AppData\Local\pip\Cache
+ key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/requirements.txt') }}-${{ hashFiles('**/requirements-test.txt') }}
+ restore-keys: |
+ ${{ runner.os }}-pip-${{ matrix.python-version }}-
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip setuptools
+ pip install --upgrade cython
+ if (Test-Path -Path '.\requirements.txt' -PathType Leaf)
+ {pip install -r requirements.txt}
+ if (Test-Path -Path '.\tests\requirements-test.txt' -PathType Leaf)
+ {pip install -r tests/requirements-test.txt}
+ pip install -e .
+
+ - name: Test with pytest
+ run: |
+ cd tests/
+ python -m pytest -vv
diff --git a/CONTRIBUTION.md b/CONTRIBUTION.md
new file mode 100644
index 00000000..e00341de
--- /dev/null
+++ b/CONTRIBUTION.md
@@ -0,0 +1,75 @@
+# Adala Project Contribution Guide: Agent and Skill Development
+
+Thank you for your interest in contributing to the Adala Project's agent development! The robustness and versatility of our system primarily stem from the diverse agents and skills we deploy. This guide focuses on agent-related contributions, highlighting the importance of domain and task specificity.
+
+## Areas of Contribution:
+
+### Diverse Skills Contributions:
+
+Adala welcomes agents equipped with a wide range of skills, each offering unique capabilities. From tasks such as classification, anomaly detection, and regression to specialized roles like sentiment analysis or recommendation systems, there's endless potential to broaden our agent spectrum. Skills designed for specific domains (like medical, finance, or nature) or tailored tasks within these areas can considerably amplify the system's efficacy.
+
+### Extending Skills:
+
+Start with the foundational Skill class and extend it to facilitate Adala in acquiring new skills. To understand better, examine how the Classification skills were implemented.
+
+Example:
+
+```python
+class
+```
+
+### Domain-Specific Skills
+
+Customize skills to particular domains, providing more profound insights and actionable feedback.
+
+Example:
+
+```python
+```
+
+#### Guidelines for New Skills:
+
+- Uniqueness: Focus on specificity. What unique problem does your skill resolve?
+- Integration: Ensure your skill aligns well with the existing Adala framework.
+- Documentation: Offer comprehensive documentation, usage instances for your agent, and a testing environment (with a ground truth dataset).
+- Testing: Incorporate both unit and integration tests to guarantee a seamless integration with the Adala system.
+
+### New Runtimes
+
+Introduce runtimes utilizing varying language models or even distinct model types for labeling tasks. Enhancing current implementations through performance optimization or new feature introduction is also encouraged.
+
+#### Adding a New Runtime:
+To introduce a new runtime, adhere to the structure delineated by the Runtime abstract class. Below is a rudimentary example:
+
+```python
+
+```
+
+### Environments
+
+The environment offers a unique method for collecting user feedback, which assists Adala agents in learning. For instance, you can create a setting where it attempts to call your phone using Twilio integration, seeking your oversight.
+
+```python
+
+```
+
+### Roadmap Driven
+
+Contributions that align with the items detailed in our roadmap, found in the main README, are not only welcome but are greatly encouraged. Adhering to this roadmap ensures that all efforts are in synergy with project's vision.
+
+## How to Contribute:
+
+- Fork the Repository: Create a fork of the Adala repository on your GitHub account.
+- Clone, Branch, and Develop: Clone your fork, spawn a new branch for your contribution, and commence development.
+- Test and Commit: After modifications, conduct comprehensive testing. Once content, commit with an informative message.
+- Push and Pull Request: Push your amendments and formulate a pull request detailing your contribution's value.
+
+## Development Environment
+
+Adala uses [PDM](https://pdm.fming.dev/latest) to manage dependencies. To create an environment, [install PDM](https://pdm.fming.dev/latest/#recommended-installation-method) and run `pdm install` from the root of the repository. To activate the environment, run `source .venv/bin/activate` from the root of the repo after creating the environment.
+
+## Code of Conduct:
+While diverse contributions invigorate our project, it's paramount to sustain a harmonious and cooperative environment. Please adhere to our code of conduct.
+
+## Questions or Discussions:
+For inquiries or discussions concerning particular features, agents, or modifications, please initiate an issue. Your feedback propels the project's advancement.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..6ec15332
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 HumanSignal, Inc
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index c0e9ca76..f97be241 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,307 @@
-# ADALA
-ADALA: Autonomous Data Labeling Agent
+[![PyPI version](https://badge.fury.io/py/adala-pk-test.svg)](https://badge.fury.io/py/adala-pk-test)
+![GitHub](https://img.shields.io/github/license/HumanSignal/Adala)
+![GitHub Repo stars](https://img.shields.io/github/stars/HumanSignal/Adala)
+[![](https://img.shields.io/discord/1166330284300570624?label=Discord&logo=discord)](https://discord.gg/QBtgTbXTgU)
+
+
+
+Adala is an **A**utonomous **DA**ta (**L**abeling) **A**gent framework.
+
+Adala offers a robust framework for implementing agents specialized in data processing, with an emphasis on
+diverse data labeling tasks. These agents are autonomous, meaning they can independently acquire one or more skills
+through iterative learning. This learning process is influenced by their operating environment, observations, and
+reflections. Users define the environment by providing a ground truth dataset. Every agent learns and applies its skills
+in what we refer to as a "runtime", synonymous with LLM.
+
+![Diagram of components](./docs/src/img/diagram.png "Diagram of components")
+
+
+
+## ๐ข Why choose Adala?
+
+- ๐ **Reliable agents**: Agents are built upon a foundation of ground
+ truth data. This ensures consistent and trustworthy results, making Adala a
+ reliable choice for your data processing needs.
+
+- ๐ฎ **Controllable output**: For every skill, you can configure the
+ desired output and set specific constraints with varying degrees of
+ flexibility. Whether you want strict adherence to particular
+ guidelines or more adaptive outputs based on the agent's learning,
+ Adala allows you to tailor results to your exact needs.
+
+- ๐ฏ **Specialized in data processing**: While agents excel in diverse
+ data labeling tasks, they can be customized for a wide range of data
+ processing needs.
+
+- ๐ง **Autonomous learning**: Adala agents aren't just automated;
+ they're intelligent. They iteratively and independently develop
+ skills based on environment, observations, and reflections.
+
+- โ
**Flexible and extensible runtime**: Adala's runtime environment is
+ adaptable. A single skill can be deployed across multiple runtimes,
+ facilitating dynamic scenarios like the student/teacher
+ architecture. Moreover, the openness of framework invites the
+ community to extend and tailor runtimes, ensuring continuous
+ evolution and adaptability to diverse needs.
+
+- ๐ **Easily customizable**: Quickly customize and develop agents to address
+ challenges specific to your needs, without facing a steep learning curve.
+
+## ๐ซต Who is Adala for?
+
+Adala is a versatile framework designed for individuals and professionals in the field of AI and machine learning. Here's who can benefit:
+
+- ๐งก **AI engineers:** Architect and design AI agent systems with modular, interconnected skills. Build production-level agent systems, abstracting low-level ML to Adala and LLMs.
+- ๐ป **Machine learning researchers:** Experiment with complex problem decomposition and causal reasoning.
+- ๐ **Data scientists:** Apply agents to preprocess and postprocess your data. Interact with Adala natively through Python notebooks when working with large Dataframes.
+- ๐ซ **Educators and students:** Use Adala as a teaching tool or as a base for advanced projects and research.
+
+While the roles highlighted above are central, it's pivotal to note that Adala is intricately designed to streamline and elevate the AI development journey,
+catering to all enthusiasts, irrespective of their specific niche in the field. ๐ฅฐ
+
+## ๐Installation
+
+Install Adala:
+
+```sh
+pip install adala
+```
+
+
+## ๐ Prerequisites
+
+Set OPENAI_API_KEY ([see instructions here](https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key))
+
+```
+export OPENAI_API_KEY='your-openai-api-key'
+```
+
+## ๐ฌ Quickstart
+
+In this example we will use Adala as a standalone library directly inside Python notebook.
+
+Click [here](./examples/quickstart.ipynb) to see an extended quickstart example.
+
+```python
+import pandas as pd
+
+from adala.agents import Agent
+from adala.datasets import DataFrameDataset
+from adala.environments import BasicEnvironment
+from adala.skills import ClassificationSkill
+from adala.runtimes import OpenAIRuntime
+from rich import print
+
+# Train dataset
+ground_truth_df = pd.DataFrame([
+ ["It was the negative first impressions, and then it started working.", "Positive"],
+ ["Not loud enough and doesn't turn on like it should.", "Negative"],
+ ["I don't know what to say.", "Neutral"],
+ ["Manager was rude, but the most important that mic shows very flat frequency response.", "Positive"],
+ ["The phone doesn't seem to accept anything except CBR mp3s.", "Negative"],
+ ["I tried it before, I bought this device for my son.", "Neutral"],
+], columns=["text", "ground_truth"])
+
+# Test dataset
+predict_df = pd.DataFrame([
+ "All three broke within two months of use.",
+ "The device worked for a long time, can't say anything bad.",
+ "Just a random line of text."
+], columns=["text"])
+
+ground_truth_dataset = DataFrameDataset(df=ground_truth_df)
+predict_dataset = DataFrameDataset(df=predict_df)
+
+agent = Agent(
+ # connect to a dataset
+ environment=BasicEnvironment(
+ ground_truth_dataset=ground_truth_dataset,
+ ground_truth_column="ground_truth"
+ ),
+
+ # define a skill
+ skills=ClassificationSkill(
+ name='sentiment_classification',
+ instructions="Label text as subjective or objective.",
+ labels=["Positive", "Negative", "Neutral"],
+ input_data_field='text'
+ ),
+
+ # define all the different runtimes your skills may use
+ runtimes = {
+ # You can specify your OPENAI API KEY here via `OpenAIRuntime(..., api_key='your-api-key')`
+ 'openai': OpenAIRuntime(model='gpt-3.5-turbo-instruct'),
+ 'openai-gpt3': OpenAIRuntime(model='gpt-3.5-turbo'),
+ # 'openai-gpt4': OpenAIRuntime(model='gpt-4'),
+ },
+ default_runtime='openai',
+
+ # NOTE! If you don't have an access to gpt4 - replace it with "openai-gpt3"
+ # default_teacher_runtime='openai-gpt4'
+)
+
+print(agent)
+print(agent.skills)
+
+agent.learn(learning_iterations=3, accuracy_threshold=0.95)
+
+print('\n=> Run tests ...')
+run = agent.apply_skills(predict_dataset)
+print('\n => Test results:')
+print(run)
+```
+
+### ๐ Available skills
+- [ClassificationSkill](./examples/classification_skill.ipynb) โ Classify text into a set of predefined labels.
+- [ClassificationSkillWithCoT](./examples/classification_skill_with_CoT.ipynb) โ Classify text into a set of predefined labels, using Chain-of-Thoughts reasoning.
+- [SummarizationSkill](./examples/summarization_skill.ipynb) โ Summarize text into a shorter text.
+- [QuestionAnsweringSkill](./examples/question_answering_skill.ipynb) โ Answer questions based on a given context.
+- [TranslationSkill](./examples/translation_skill.ipynb) โ Translate text from one language to another.
+- [TextGenerationSkill](./examples/text_generation_skill.ipynb) โ Generate text based on a given prompt.
+
+
+
+## ๐บ Roadmap
+
+- [ ] Low-level skill management (i.e. agent.get_skill("name"))
+- [ ] Calculate and store top line Agent metrics (predictions created, runtime executions, learning loops, etc)
+- [ ] Create Named Entity Recognition Skill
+- [ ] Extend environment with one more example
+- [ ] Command line utility (see the source for this readme for example)
+- [ ] REST API to interact with Adala
+- [ ] Multi-task learning (learn multiple skills at once)
+- [ ] Vision and multi-modal agent skills
+
+## ๐คฉ Contributing to Adala
+
+Enhance skills, optimize runtimes, or pioneer new agent types. Whether you're
+crafting nuanced tasks, refining computational environments, or sculpting specialized agents for unique domains, your
+contributions will power Adala's evolution. Join us in shaping the future of intelligent systems and making Adala more
+versatile and impactful for users across the globe.
+
+[Read more](./CONTRIBUTION.md) here.
+
+## ๐ฌ Support
+
+Do you need help or are you looking to engage with community? Check out [Discord channel](https://discord.gg/QBtgTbXTgU)!
+Whether you have questions, need clarification, or simply want to discuss topics related to the project, the Discord community is welcoming!
diff --git a/adala/__init__.py b/adala/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/adala/agents/__init__.py b/adala/agents/__init__.py
new file mode 100644
index 00000000..ebbc5c75
--- /dev/null
+++ b/adala/agents/__init__.py
@@ -0,0 +1 @@
+from .base import Agent
\ No newline at end of file
diff --git a/adala/agents/base.py b/adala/agents/base.py
new file mode 100644
index 00000000..334f4f1f
--- /dev/null
+++ b/adala/agents/base.py
@@ -0,0 +1,284 @@
+from pydantic import BaseModel, Field, SkipValidation, field_validator, model_validator, ValidationError
+from abc import ABC, abstractmethod
+from typing import Any, Optional, List, Dict, Union
+from adala.environments.base import Environment, BasicEnvironment
+from adala.datasets import Dataset, DataFrameDataset
+from adala.runtimes.base import Runtime, LLMRuntime, LLMRuntimeType, LLMRuntimeModelType
+from adala.runtimes.openai import OpenAIRuntime
+from adala.memories.base import ShortTermMemory, LongTermMemory
+from adala.skills.base import BaseSkill
+from adala.skills.skillset import SkillSet, LinearSkillSet
+from adala.utils.logs import print_dataframe, print_text, print_error
+from adala.utils.internal_data import InternalDataFrame
+
+
+class Agent(BaseModel, ABC):
+ """
+ Represents a customizable agent that can interact with environments,
+ employ skills, and leverage memory and runtimes.
+
+ Attributes:
+ environment (Union[Dataset, Environment]): The environment with which the agent interacts.
+ skills (Union[SkillSet, BaseSkill, List[BaseSkill], Dict[str, BaseSkill]]): The skills possessed by the agent.
+ memory (LongTermMemory, optional): The agent's long-term memory. Defaults to None.
+ runtimes (Dict[str, Runtime], optional): The runtimes available to the agent. Defaults to predefined runtimes.
+ default_runtime (str): The default runtime used by the agent. Defaults to 'openai'.
+ """
+
+ environment: Union[InternalDataFrame, Dataset, Environment] = Field(default_factory=DataFrameDataset)
+ skills: Union[SkillSet, BaseSkill, List[BaseSkill], Dict[str, BaseSkill]]
+
+ memory: LongTermMemory = Field(default=None)
+ runtimes: Optional[Dict[str, Runtime]] = Field(
+ default_factory=lambda: {
+ 'openai': OpenAIRuntime(model='gpt-3.5-turbo-instruct'),
+ # 'llama2': LLMRuntime(
+ # llm_runtime_type=LLMRuntimeModelType.Transformers,
+ # llm_params={
+ # 'model': 'meta-llama/Llama-2-7b',
+ # 'device': 'cuda:0',
+ # }
+ # )
+ }
+ )
+ teacher_runtimes: Optional[Dict[str, Runtime]] = Field(
+ default_factory=lambda: {
+ 'openai-gpt3': OpenAIRuntime(model='gpt-3.5-turbo'),
+ 'openai-gpt4': OpenAIRuntime(model='gpt-4')
+ }
+ )
+ default_runtime: str = 'openai'
+ default_teacher_runtime: str = 'openai-gpt3'
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def __rich__(self):
+ """
+ Returns a colorized and formatted representation of the Agent instance.
+
+ Returns:
+ str: A rich-formatted representation of the agent.
+ """
+
+ skill_names = ", ".join([skill.name for skill in self.skills.skills.values()])
+ runtime_names = ", ".join(self.runtimes.keys())
+
+ return (
+ f"[bold blue]Agent Instance[/bold blue]\n\n"
+ f"Environment: {self.environment.__class__.__name__}\n"
+ f"Skills: {skill_names}\n"
+ f"Runtimes: {runtime_names}\n"
+ f"Default Runtime: {self.default_runtime}\n"
+ f"Default Teacher Runtime: {self.default_teacher_runtime}"
+ )
+
+ @field_validator('environment')
+ def environment_validator(cls, v):
+ """
+ Validates and possibly transforms the environment attribute.
+
+ Args:
+ v (Union[Dataset, Environment]): The environment value to validate.
+
+ Returns:
+ Environment: The validated environment.
+ """
+ if isinstance(v, InternalDataFrame):
+ v = DataFrameDataset(df=v)
+ if isinstance(v, Dataset):
+ v = BasicEnvironment(dataset=v)
+ return v
+
+ @field_validator('skills')
+ def skills_validator(cls, v):
+ """
+ Validates and possibly transforms the skills attribute.
+
+ Args:
+ v (Union[SkillSet, BaseSkill, List[BaseSkill], Dict[str, BaseSkill]]): The skills value to validate.
+
+ Returns:
+ SkillSet: The validated set of skills.
+ """
+
+ if isinstance(v, SkillSet):
+ pass
+ elif isinstance(v, BaseSkill):
+ v = LinearSkillSet(skills={'skill_0': v})
+ elif isinstance(v, list):
+ v = LinearSkillSet(skills={f'skill_{i}': skill for i, skill in enumerate(v)})
+ elif isinstance(v, dict):
+ v = LinearSkillSet(skills=v)
+ return v
+
+ @model_validator(mode='after')
+ def verify_input_parameters(self):
+ def _raise_default_runtime_error(val, runtime, runtimes, default_value):
+ print_error(f"The Agent.{runtime} is set to {val}, "
+ f"but this runtime is not available in the list: {list(runtimes)}. "
+ f"Please choose one of the available runtimes and initialize the agent again, for example:\n\n"
+ f"agent = Agent(..., {runtime}='{default_value}')\n\n"
+ f"Make sure the default runtime is available in the list of runtimes. For example:\n\n"
+ f"agent = Agent(..., runtimes={{'{default_value}': OpenAIRuntime(model='gpt-4')}})\n\n")
+ raise ValueError(f"default runtime {val} not found in provided runtimes.")
+
+ if self.default_runtime not in self.runtimes:
+ _raise_default_runtime_error(self.default_runtime, 'default_runtime', self.runtimes, 'openai')
+ if self.default_teacher_runtime not in self.teacher_runtimes:
+ _raise_default_runtime_error(self.default_teacher_runtime, 'default_teacher_runtime', self.teacher_runtimes, 'openai-gpt4')
+ return self
+
+ def get_runtime(self, runtime: Optional[str] = None) -> Runtime:
+ """
+ Retrieves the specified runtime or the default runtime if none is specified.
+
+ Args:
+ runtime (str, optional): The name of the runtime to retrieve. Defaults to None.
+
+ Returns:
+ Runtime: The requested runtime.
+
+ Raises:
+ ValueError: If the specified runtime is not found.
+ """
+
+ if runtime is None:
+ runtime = self.default_runtime
+ if runtime not in self.runtimes:
+ raise ValueError(f'Runtime "{runtime}" not found.')
+ return self.runtimes[runtime]
+
+ def get_teacher_runtime(self, runtime: Optional[str] = None) -> Runtime:
+ """
+ Retrieves the specified teacher runtime or the default runtime if none is specified.
+
+ Args:
+ runtime (str, optional): The name of the runtime to retrieve. Defaults to None.
+
+ Returns:
+ Runtime: The requested runtime.
+
+ Raises:
+ ValueError: If the specified runtime is not found.
+ """
+
+ if runtime is None:
+ runtime = self.default_teacher_runtime
+ if runtime not in self.teacher_runtimes:
+ raise ValueError(f'Teacher Runtime "{runtime}" not found.')
+ return self.teacher_runtimes[runtime]
+
+ def apply_skills(
+ self,
+ dataset: Union[Dataset, InternalDataFrame],
+ runtime: Optional[Union[str, Runtime]] = None,
+ experience: Optional[ShortTermMemory] = None,
+ ) -> ShortTermMemory:
+ """
+ Applies the agent's skills to a given dataset using the specified runtime.
+
+ Args:
+ dataset (Dataset): The dataset to apply skills on.
+ runtime (str, optional): The runtime to use. Defaults to None.
+ experience (ShortTermMemory, optional): The agent's short-term memory. Defaults to None.
+
+ Returns:
+ ShortTermMemory: The short-term memory resulting from the application of skills.
+ """
+ runtime = runtime or self.default_runtime
+ if isinstance(dataset, InternalDataFrame):
+ dataset = DataFrameDataset(df=dataset)
+ if isinstance(runtime, str):
+ runtime = self.get_runtime(runtime=runtime)
+ return self.skills.apply(dataset=dataset, runtime=runtime, experience=experience)
+
+ def learn(
+ self,
+ learning_iterations: int = 3,
+ accuracy_threshold: float = 0.9,
+ update_skills: bool = True,
+ update_memory: bool = True,
+ request_environment_feedback: bool = True,
+ experience: Optional[ShortTermMemory] = None,
+ runtime: Optional[str] = None,
+ ) -> ShortTermMemory:
+ """
+ Enables the agent to learn and improve its skills based on interactions with its environment.
+
+ Args:
+ learning_iterations (int, optional): The number of iterations for learning. Defaults to 3.
+ accuracy_threshold (float, optional): The desired accuracy threshold to reach. Defaults to 0.9.
+ update_skills (bool, optional): Flag to determine if skills should be updated after learning. Defaults to True.
+ update_memory (bool, optional): Flag to determine if memory should be updated after learning. Defaults to True.
+ request_environment_feedback (bool, optional): Flag to determine if feedback should be requested from the environment. Defaults to True.
+ experience (ShortTermMemory, optional): Initial experience for the learning process. Defaults to None.
+ runtime (str, optional): The runtime to be used for the learning process. Defaults to None.
+
+ Returns:
+ ShortTermMemory: The short-term memory after the learning process.
+ """
+
+ runtime = self.get_runtime(runtime=runtime)
+ # TODO: support teacher runtime input, not default
+ teacher_runtime = self.get_teacher_runtime(runtime=self.default_teacher_runtime)
+
+ skills = self.skills.model_copy(deep=True)
+ dataset = self.environment.as_dataset()
+
+ # Apply agent skills to dataset and get experience with predictions
+ experience = self.apply_skills(dataset=dataset, runtime=runtime, experience=experience)
+
+ # Agent select one skill to improve
+ learned_skill = skills.select_skill_to_improve(experience)
+
+ # Request feedback from environment is necessary
+ if request_environment_feedback:
+ self.environment.request_feedback(learned_skill, experience)
+
+ for iteration in range(learning_iterations):
+ print_text(f'\n\n=> Iteration #{iteration}: Comparing to ground truth, analyzing and improving ...')
+
+ # 1. EVALUATION PHASE: Compare predictions to ground truth
+ experience = self.environment.compare_to_ground_truth(learned_skill, experience)
+ print_text(f'Comparing predictions to ground truth data ...')
+ print_dataframe(experience.evaluations)
+
+ # 2. ANALYSIS PHASE: Analyze evaluation experience, optionally use long term memory
+ print_text(f'Analyze evaluation experience ...')
+ experience = learned_skill.analyze(
+ experience=experience,
+ student_runtime=runtime,
+ teacher_runtime=teacher_runtime,
+ memory=self.memory
+ )
+ print_text(f'Number of errors: {len(experience.errors)}')
+
+ print_text(f'Accuracy = {experience.accuracy*100:0.2f}%', style='bold red')
+ if experience.accuracy >= accuracy_threshold:
+ print_text(f'Accuracy threshold reached ({experience.accuracy} >= {accuracy_threshold})')
+ break
+
+ # 3. IMPROVEMENT PHASE: Improve skills based on analysis
+ print_text(f"Improve \"{learned_skill.name}\" skill based on analysis ...")
+ experience = learned_skill.improve(
+ experience=experience,
+ runtime=teacher_runtime,
+ update_instructions=True
+ )
+ print_text(f'Updated instructions for skill "{learned_skill.name}":\n')
+ print_text(learned_skill.instructions, style='bold green')
+
+ # 4. RE-APPLY PHASE: Re-apply skills to dataset
+ print_text(f"Re-apply {learned_skill.name} skill to dataset ...")
+ experience = learned_skill.apply(dataset, runtime, experience=experience)
+
+ # Update skills and memory based on experience
+ if update_skills:
+ self.skills = skills
+
+ if self.memory and update_memory:
+ self.memory.remember(experience, self.skills)
+
+ print_text('Train is done!')
+ return experience
diff --git a/adala/datasets/__init__.py b/adala/datasets/__init__.py
new file mode 100644
index 00000000..2783af7e
--- /dev/null
+++ b/adala/datasets/__init__.py
@@ -0,0 +1,3 @@
+from .base import Dataset, InternalDataFrame
+from .dataframe import DataFrameDataset
+# from .label_studio import LabelStudioDataset, LabelStudioFileDataset
diff --git a/adala/datasets/base.py b/adala/datasets/base.py
new file mode 100644
index 00000000..a85dcf6f
--- /dev/null
+++ b/adala/datasets/base.py
@@ -0,0 +1,82 @@
+from abc import ABC, abstractmethod
+from pydantic import BaseModel, field_validator
+from typing import List, Optional, Any, Dict, Union
+
+from adala.utils.internal_data import InternalDataFrame
+
+
+class Dataset(BaseModel, ABC):
+ """
+ Abstract base class representing a dataset.
+
+ Provides methods to interact with and obtain information about datasets.
+ Concrete implementations should provide functionality for batch iteration,
+ getting dataset size, and displaying dataset information.
+ """
+
+ @abstractmethod
+ def batch_iterator(self, batch_size: int = 100) -> InternalDataFrame:
+ """
+ Yields batches of data records from the dataset.
+
+ Args:
+ batch_size (int, optional): Size of each batch to be yielded. Defaults to 100.
+
+ Returns:
+ InternalDataFrame: A data frame containing a batch of records.
+ """
+
+ @abstractmethod
+ def __len__(self) -> int:
+ """
+ Provides the number of records in the dataset.
+
+ Returns:
+ int: Total number of records in the dataset.
+ """
+
+ @abstractmethod
+ def info(self) -> None:
+ """
+ Displays information about the dataset.
+ """
+
+
+class BlankDataset(Dataset):
+ """
+ Represents an empty dataset with no records.
+
+ This class can be used in situations where a dataset is required,
+ but no actual data is available or needed.
+ All methods return defaults representing an empty state.
+ """
+
+ def batch_iterator(self, batch_size: int = 100) -> InternalDataFrame:
+ """
+ Yields an empty data frame as there are no records in a blank dataset.
+
+ Args:
+ batch_size (int, optional): This argument is ignored for BlankDataset. Defaults to 100.
+
+ Returns:
+ InternalDataFrame: An empty data frame.
+ """
+
+ return InternalDataFrame()
+
+ def __len__(self) -> int:
+ """
+ Provides the number of records in the blank dataset (which is always 0).
+
+ Returns:
+ int: Total number of records in the dataset (0 for BlankDataset).
+ """
+
+ return 0
+
+ def info(self) -> None:
+ """
+ Displays information about the blank dataset.
+ """
+
+ print('Blank dataset')
diff --git a/adala/datasets/dataframe.py b/adala/datasets/dataframe.py
new file mode 100644
index 00000000..e7dae40f
--- /dev/null
+++ b/adala/datasets/dataframe.py
@@ -0,0 +1,53 @@
+from typing import Iterable
+from .base import Dataset
+from adala.utils.internal_data import InternalDataFrame
+from pydantic import Field
+
+
+class DataFrameDataset(Dataset):
+ """
+ Represents a dataset backed by an internal data frame.
+
+ Provides methods to interact with and obtain information about the dataset stored
+ as an internal data frame. This class wraps around `InternalDataFrame` to make it
+ compatible with the dataset abstraction.
+
+ Attributes:
+ df (InternalDataFrame): The internal data frame storing the dataset.
+ """
+
+ df: InternalDataFrame = Field(default_factory=InternalDataFrame)
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def __len__(self):
+ """
+ Provides the number of records in the dataset.
+
+ Returns:
+ int: Total number of records in the dataset.
+ """
+
+ return len(self.df)
+
+ def batch_iterator(self, batch_size: int = 100) -> Iterable[InternalDataFrame]:
+ """
+ Yields batches of data records from the dataset.
+
+ Args:
+ batch_size (int, optional): Size of each batch to be yielded. Defaults to 100.
+
+ Yields:
+ Iterable[InternalDataFrame]: An iterator that yields data frames containing batches of records.
+ """
+
+ for i in range(0, len(self.df), batch_size):
+ yield self.df.iloc[i:i+batch_size]
+
+ def info(self) -> None:
+ """
+ Displays information (statistical description) about the dataset.
+ """
+
+ print(self.df.describe())
diff --git a/adala/datasets/label_studio.py b/adala/datasets/label_studio.py
new file mode 100644
index 00000000..20eccb62
--- /dev/null
+++ b/adala/datasets/label_studio.py
@@ -0,0 +1,129 @@
+import json
+import label_studio_sdk
+
+from pprint import pprint
+from .base import Dataset, InternalDataFrame
+from pydantic import model_validator, SkipValidation
+from label_studio_sdk.project import LabelStudioException, Project
+from typing import Optional, List, Dict
+
+
+class LabelStudioFormatMixin:
+
+ def _tasks_to_df(
+ self,
+ tasks,
+ include_annotations: bool = True,
+ only_annotated: bool = False,
+ ground_truth_column: str = 'ground_truth'
+ ):
+ indices, records = [], []
+ for task in tasks:
+ record = task['data']
+ if only_annotated and not task['annotations']:
+ continue
+
+ if (only_annotated or include_annotations) and task['annotations']:
+ # TODO: expand more complex annotations
+ if len(task['annotations']) > 1:
+ raise NotImplementedError('Multiple annotations are not supported yet')
+ annotation = task['annotations'][0]
+ annotation_type = annotation['result'][0]['type']
+ if annotation_type == 'textarea':
+ annotation_type = 'text'
+ if len(annotation['result']) > 1:
+ raise NotImplementedError('Multiple results per annotation are not supported yet')
+ label = annotation['result'][0]['value'][annotation_type]
+ if isinstance(label, list):
+ if len(label) == 1:
+ label = label[0]
+ else:
+ label = ','.join(sorted(label))
+ else:
+ label = str(label)
+ record[ground_truth_column] = label
+
+ index = task['id']
+ records.append(record)
+ indices.append(index)
+ return InternalDataFrame(records, index=indices)
+
+
+class LabelStudioDataset(Dataset, LabelStudioFormatMixin):
+
+ label_studio_url: str
+ label_studio_api_key: str
+ label_studio_project_id: int
+
+ ground_truth_column: str = 'ground_truth'
+
+ _project_client: SkipValidation[Project] = None
+
+ @model_validator(mode='after')
+ def init_client(self):
+ if self._project_client is None:
+ client = label_studio_sdk.Client(
+ url=self.label_studio_url,
+ api_key=self.label_studio_api_key
+ )
+ self._project_client = client.get_project(id=self.label_studio_project_id)
+ return self
+
+ def get_project_info(self):
+ return self._project_client.get_params()
+
+ def info(self) -> None:
+ pprint(self.get_project_info())
+
+ def __len__(self):
+ info = self.get_project_info()
+ return info['task_number']
+
+ def batch_iterator(self, batch_size: int = 100) -> InternalDataFrame:
+ page = 1
+ while True:
+ try:
+ data = self._project_client.get_paginated_tasks(page=page, page_size=batch_size)
+ yield self._tasks_to_df(data['tasks'], include_annotations=False)
+ page += 1
+ # we'll get 404 from API on empty page
+ except LabelStudioException as e:
+ break
+
+ def get_ground_truth(self, batch: Optional[InternalDataFrame] = None) -> InternalDataFrame:
+ if batch is None:
+ labeled_tasks = self._project_client.get_labeled_tasks()
+ gt = self._tasks_to_df(labeled_tasks, only_annotated=True, ground_truth_column='ground_truth')
+ return gt
+ else:
+ # TODO: not the most effective method - better to send subset of indices to LS API
+ labeled_tasks = self._project_client.get_labeled_tasks()
+ gt = self._tasks_to_df(labeled_tasks, only_annotated=True, ground_truth_column='ground_truth')
+ return gt[gt.index.isin(batch.index)]
+
+
+class LabelStudioFileDataset(Dataset, LabelStudioFormatMixin):
+ label_studio_file: str
+ ground_truth_column: str = 'ground_truth'
+
+ _data: List[Dict] = None
+
+ @model_validator(mode='after')
+ def load_data(self):
+ with open(self.label_studio_file) as f:
+ self._data = json.load(f)
+ return self
+
+ def batch_iterator(self, batch_size: int = 100) -> InternalDataFrame:
+ for i in range(0, len(self._data), batch_size):
+ batch = self._data[i:i+batch_size]
+ yield self._tasks_to_df(batch, include_annotations=False)
+
+ def get_ground_truth(self, batch: Optional[InternalDataFrame]) -> InternalDataFrame:
+ return self._tasks_to_df(self._data, only_annotated=True, ground_truth_column='ground_truth')
+
+ def __len__(self):
+ return len(self._data)
+
+ def info(self) -> None:
+ print(f'Total Label Studio tasks loaded: {len(self)}')
diff --git a/adala/environments/__init__.py b/adala/environments/__init__.py
new file mode 100644
index 00000000..7448fa4d
--- /dev/null
+++ b/adala/environments/__init__.py
@@ -0,0 +1 @@
+from .base import Environment, BasicEnvironment
\ No newline at end of file
diff --git a/adala/environments/base.py b/adala/environments/base.py
new file mode 100644
index 00000000..4f4a3f0c
--- /dev/null
+++ b/adala/environments/base.py
@@ -0,0 +1,126 @@
+from pydantic import BaseModel, dataclasses, Field, field_validator
+from abc import ABC, abstractmethod
+from typing import Any, Optional, Dict, Union, Callable
+
+from adala.utils.internal_data import InternalDataFrame, InternalDataFrameConcat
+from adala.skills.base import BaseSkill
+from adala.memories.base import ShortTermMemory
+from adala.datasets import Dataset, DataFrameDataset
+
+
+class Environment(BaseModel, ABC):
+ """Abstract base class for environments.
+
+ The environment provides a mechanism to obtain ground truth information from raw data and predictions,
+ and also facilitates comparison of ground truth with predictions.
+
+ Attributes:
+ Config (class): Configuration for the environment class, allows arbitrary types.
+ """
+
+ @abstractmethod
+ def request_feedback(self, skill: BaseSkill, experience: ShortTermMemory):
+ """Request user feedback using predictions and update internal ground truth set."""
+
+ @abstractmethod
+ def compare_to_ground_truth(self, skill: BaseSkill, experience: ShortTermMemory) -> ShortTermMemory:
+ """Compare predictions with ground truth and return the results."""
+
+ @abstractmethod
+ def as_dataset(self) -> Dataset:
+ """Convert the environment to a dataset."""
+
+ @abstractmethod
+ def save(self):
+ """Persist the state of the environment."""
+
+ @abstractmethod
+ def restore(self):
+ """Retrieve and set the state of the environment."""
+
+ class Config:
+ arbitrary_types_allowed = True
+
+
+class BasicEnvironment(Environment):
+ """Basic environment implementation.
+
+ This environment assumes the ground truth is provided explicitly with the input data.
+ For comparison with ground truth, exact matching is used.
+
+ Attributes:
+ ground_truth_dataset (DataFrameDataset): Dataset containing the ground truth data.
+ Defaults to an empty DataFrameDataset.
+ ground_truth_column (str): Name of the column containing ground truth in the dataset.
+ Defaults to 'ground_truth'.
+ _prediction_column (str): Name of the column containing predictions.
+
+ """
+
+ ground_truth_dataset: Union[InternalDataFrame, DataFrameDataset] = Field(default_factory=DataFrameDataset)
+ ground_truth_column: str = 'ground_truth'
+
+ _prediction_column: str
+
+ @field_validator('ground_truth_dataset')
+ def _validate_ground_truth_dataset(cls, v):
+ if isinstance(v, InternalDataFrame):
+ return DataFrameDataset(df=v)
+ return v
+
+ def request_feedback(self, skill: BaseSkill, experience: ShortTermMemory):
+ """In the BasicEnvironment, ground truth is already provided with the input data."""
+
+ def compare_to_ground_truth(self, skill: BaseSkill, experience: ShortTermMemory) -> ShortTermMemory:
+ """Compare the predictions with the ground truth using exact matching.
+
+ Args:
+ skill (BaseSkill): The skill being evaluated.
+ experience (ShortTermMemory): The experience memory containing predictions.
+
+ Returns:
+ ShortTermMemory: Updated memory containing evaluation results against ground truth.
+ """
+
+ experience = experience.model_copy()
+
+ gt = self.ground_truth_dataset.df[self.ground_truth_column]
+ pred = experience.predictions
+ # select
+ gt = gt[gt.index.isin(pred.index)]
+ if gt.empty:
+ # return empty memory
+ return experience
+
+ gt = gt.to_frame(self.ground_truth_column)
+
+ # compare ground truth with predictions using exact matching
+ match_column_name = f'{self.ground_truth_column}__x__{skill.name}'
+ evaluations = InternalDataFrameConcat([
+ pred,
+ (gt[self.ground_truth_column] == pred[skill.name]).rename(match_column_name)
+ ], axis=1)
+ experience.evaluations = evaluations
+ # remember the last column names used in evaluations
+ experience.ground_truth_column_name = self.ground_truth_column
+ experience.match_column_name = match_column_name
+ return experience
+
+ def as_dataset(self) -> Dataset:
+ """Return the ground truth dataset.
+
+ Returns:
+ Dataset: The dataset containing ground truth data.
+ """
+
+ return self.ground_truth_dataset
+
+ def save(self):
+ """Save method for BasicEnvironment. Not implemented."""
+
+ raise NotImplementedError
+
+ def restore(self):
+ """Restore method for BasicEnvironment. Not implemented."""
+
+ raise NotImplementedError
diff --git a/adala/memories/__init__.py b/adala/memories/__init__.py
new file mode 100644
index 00000000..8f218bff
--- /dev/null
+++ b/adala/memories/__init__.py
@@ -0,0 +1,2 @@
+from .file_memory import FileMemory
+from .base import ShortTermMemory, LongTermMemory
\ No newline at end of file
diff --git a/adala/memories/base.py b/adala/memories/base.py
new file mode 100644
index 00000000..822f9ddf
--- /dev/null
+++ b/adala/memories/base.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+from abc import ABC, abstractmethod
+from typing import Any, Optional, TYPE_CHECKING
+
+from pydantic import BaseModel
+from adala.datasets.base import Dataset, InternalDataFrame
+from rich import print
+
+if TYPE_CHECKING:
+ from adala.skills.skillset import SkillSet
+
+
+class ShortTermMemory(BaseModel):
+ """
+ Base class for short term memory storage
+ """
+ dataset: Dataset = None
+ predictions: InternalDataFrame = None
+ evaluations: InternalDataFrame = None
+ ground_truth_column_name: str = None
+ match_column_name: str = None
+ errors: InternalDataFrame = None
+ accuracy: float = None
+ initial_instructions: str = None
+ updated_instructions: str = None
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def reset(self):
+ self.predictions = None
+ self.evaluations = None
+ self.errors = None
+ self.accuracy = None
+ self.initial_instructions = None
+ self.updated_instructions = None
+
+ def __rich__(self):
+ text = '[bold blue]Agent Experience:[/bold blue]\n\n'
+ if self.predictions is not None:
+ text += f'\n[bold]Predictions[/bold]\n{self.predictions}'
+ if self.evaluations is not None:
+ text += f'\n[bold]Evaluations[/bold]\n{self.evaluations}'
+ if self.errors is not None:
+ text += f'\n[bold]Errors[/bold]\n{self.errors}'
+ if self.accuracy is not None:
+ text += f'\n[bold]Accuracy[/bold]\n{self.accuracy}'
+ if self.initial_instructions is not None:
+ text += f'\n[bold]Initial Instructions[/bold]\n{self.initial_instructions}'
+ if self.updated_instructions is not None:
+ text += f'\n[bold]Updated Instructions[/bold]\n{self.updated_instructions}'
+ return text
+
+ def display(self):
+ print(self)
+
+
+class LongTermMemory(BaseModel, ABC):
+
+ """
+ Base class for long-term memories.
+ Long-term memories are used to store acquired knowledge and can be shared between agents.
+ """
+
+ @abstractmethod
+ def remember(self, experience: ShortTermMemory, skills: SkillSet):
+ """
+ Base method for remembering experiences in long term memory.
+ """
+
+ @abstractmethod
+ def retrieve(self, observations: ShortTermMemory) -> ShortTermMemory:
+ """
+ Base method for retrieving past experiences from long term memory, based on current observations
+ """
diff --git a/adala/memories/file_memory.py b/adala/memories/file_memory.py
new file mode 100644
index 00000000..f2972667
--- /dev/null
+++ b/adala/memories/file_memory.py
@@ -0,0 +1,21 @@
+from .base import LongTermMemory, ShortTermMemory
+from typing import Any
+
+
+class FileMemory(LongTermMemory):
+
+ filepath: str
+
+ def remember(self, experience: ShortTermMemory):
+ """
+ Serialize experience in JSON and append to file
+ """
+ experience_json = experience.model_dump_json()
+ with open(self.filepath, 'a') as f:
+ f.write(experience_json + '\n')
+
+ def retrieve(self, observations: ShortTermMemory) -> ShortTermMemory:
+ """
+ Retrieve experience from file
+ """
+ raise NotImplementedError
diff --git a/adala/runtimes/__init__.py b/adala/runtimes/__init__.py
new file mode 100644
index 00000000..807e8176
--- /dev/null
+++ b/adala/runtimes/__init__.py
@@ -0,0 +1,2 @@
+from .base import Runtime, LLMRuntime, LLMRuntimeModelType
+from .openai import OpenAIRuntime
diff --git a/adala/runtimes/base.py b/adala/runtimes/base.py
new file mode 100644
index 00000000..641c7449
--- /dev/null
+++ b/adala/runtimes/base.py
@@ -0,0 +1,301 @@
+import enum
+import guidance
+import re
+
+from tqdm import tqdm
+from abc import ABC, abstractmethod
+from pydantic import BaseModel, model_validator
+from typing import List, Dict, Optional, Tuple, Any
+from adala.datasets.base import InternalDataFrame
+
+tqdm.pandas()
+
+
+class Runtime(BaseModel, ABC):
+ """
+ Base class representing a generic runtime environment.
+
+ Attributes:
+ verbose (bool): Flag indicating if runtime outputs should be verbose. Defaults to False.
+ """
+ verbose: bool = False
+
+ @model_validator(mode='after')
+ def init_runtime(self):
+ """Initializes the runtime.
+
+ This method should be used to validate and potentially initialize the runtime instance.
+
+ Returns:
+ Runtime: The initialized runtime instance.
+ """
+ return self
+
+
+class LLMRuntimeType(enum.Enum):
+ STUDENT = 'student'
+ TEACHER = 'teacher'
+
+
+class LLMRuntimeModelType(enum.Enum):
+ """Enumeration for LLM runtime model types."""
+ OpenAI = 'OpenAI'
+ Transformers = 'Transformers'
+
+
+class LLMRuntime(Runtime):
+ """
+ Class representing an LLM runtime environment.
+
+ Attributes:
+ llm_runtime_type (LLMRuntimeModelType): Type of the LLM runtime. Defaults to OpenAI.
+ llm_params (Dict[str, str]): Parameters for the LLM runtime. Defaults to a basic GPT-3.5 configuration.
+
+ _llm: Internal instance for the LLM model. Initialized in `init_runtime`.
+ _program: Program instance used for guidance. Initialized in `init_runtime`.
+ _llm_template (str): Template string for LLM guidance.
+ """
+ llm_runtime_type: LLMRuntimeType = LLMRuntimeType.STUDENT
+ llm_runtime_model_type: LLMRuntimeModelType = LLMRuntimeModelType.OpenAI
+ llm_params: Dict[str, str] = {
+ 'model': 'gpt-3.5-turbo-instruct',
+ # 'max_tokens': 10,
+ # 'temperature': 0,
+ }
+ _llm = None
+ _program = None
+ # do not override this template
+ _llm_template: str = '''\
+{{>instructions_program}}
+
+{{>input_program}}
+{{>output_program}}'''
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def _create_program(self):
+ # create an LLM instance
+ if self.llm_runtime_model_type.value == LLMRuntimeModelType.OpenAI.value:
+ self._llm = guidance.llms.OpenAI(**self.llm_params)
+ elif self.llm_runtime_model_type.value == LLMRuntimeModelType.Transformers.value:
+ self._llm = guidance.llms.Transformers(**self.llm_params)
+ else:
+ raise NotImplementedError(f'LLM runtime type {self.llm_runtime_model_type} is not implemented.')
+ self._program = guidance(self._llm_template, llm=self._llm, silent=not self.verbose)
+
+ def init_runtime(self):
+ """Initializes the LLM runtime environment.
+
+ Creates an LLM instance based on the runtime type and parameters.
+
+ Returns:
+ LLMRuntime: Initialized runtime instance.
+ """
+ self._create_program()
+ return self
+
+ def get_outputs(self, output_template: str) -> List[str]:
+ """Extracts output fields from the output template.
+
+ Args:
+ output_template (str): The template string to extract output fields from.
+
+ Returns:
+ List[str]: List of extracted output fields.
+ """
+ # search for all occurrences of {{...'output'...}}
+ # TODO: this is a very naive regex implementation - likely to fail in many cases
+ outputs = re.findall(r'\'(.*?)\'', output_template)
+ return outputs
+
+ def _process_record(
+ self,
+ record,
+ program,
+ extra_fields,
+ outputs=None
+ ):
+ """Processes a single record using the guidance program.
+
+ Args:
+ record (dict or InternalDataFrame): The record to be processed.
+ program (callable): The guidance program for processing.
+ extra_fields (dict, optional): Additional fields to include in the processed record.
+ outputs (list of str, optional): Specific output fields to extract from the result.
+
+ Returns:
+ dict: Processed output for the record.
+ """
+
+ if not isinstance(record, dict):
+ record = record.to_dict()
+ else:
+ record = record.copy()
+ verified_input = record
+ # exclude guidance parameter from input
+ if 'text' in verified_input:
+ verified_input['text_'] = verified_input['text']
+ del verified_input['text']
+
+ verified_input.update(extra_fields)
+ result = program(
+ silent=not self.verbose,
+ **verified_input
+ )
+ if outputs is None:
+ verified_output = {'': str(result)}
+ else:
+ verified_output = {field: result[field] for field in outputs}
+
+ return verified_output
+
+ def get_input_program(self, input_template):
+ """Generates an input program from the provided template.
+
+ Args:
+ input_template (str): Template to generate the input program.
+
+ Returns:
+ callable: The generated input program.
+ """
+
+ # fix input template in case "text" is presented there - there might be other paramater names as well...
+ fixed_input_template = input_template
+ if '{{text}}' in fixed_input_template:
+ fixed_input_template = fixed_input_template.replace('{{text}}', '{{text_}}')
+ input_program = guidance(fixed_input_template, llm=self._llm, silent=not self.verbose)
+ return input_program
+
+ def get_output_program(self, output_template):
+ """Generates an output program from the provided template.
+
+ Args:
+ output_template (str): Template to generate the output program.
+
+ Returns:
+ callable: The generated output program.
+ """
+
+ return guidance(output_template, llm=self._llm)
+
+ def get_instructions_program(self, instructions):
+ """Generates an instructions program from the provided template.
+
+ Args:
+ instructions (str): The instructions to generate the program.
+
+ Returns:
+ callable: The generated instructions program.
+ """
+
+ return guidance(instructions, llm=self._llm)
+
+ def process_record(
+ self,
+ record: Dict[str, Any],
+ input_template: str,
+ output_template: str,
+ instructions: str,
+ extra_fields: Optional[Dict[str, Any]] = None,
+ ) -> Dict[str, Any]:
+ """Processes a record using the provided templates and instructions.
+
+ Args:
+ record (Dict[str, Any]): The record data to be processed.
+ input_template (str): Template for input processing.
+ output_template (str): Template for output processing.
+ instructions (str): Instructions for guidance.
+ extra_fields (Dict[str, Any], optional): Additional fields to include during processing.
+
+ Returns:
+ Dict[str, Any]: The processed record.
+ """
+
+ outputs = re.findall(r'\'(.*?)\'', output_template)
+
+ input = record.copy()
+ input.update({
+ 'input_program': self.get_input_program(input_template),
+ 'output_program': self.get_output_program(output_template),
+ 'instructions_program': self.get_instructions_program(instructions),
+ })
+ output = self._process_record(
+ record=input,
+ program=self._program,
+ outputs=outputs,
+ extra_fields=extra_fields
+ )
+ return output
+
+ def process_batch(
+ self,
+ batch: InternalDataFrame,
+ input_template: str,
+ output_template: str,
+ instructions: str,
+ extra_fields: Optional[Dict[str, Any]] = None,
+ ) -> InternalDataFrame:
+ """Processes a batch of records using the provided templates and instructions.
+
+ Args:
+ batch (InternalDataFrame): The batch of records to be processed.
+ input_template (str): Template for input processing.
+ output_template (str): Template for output processing.
+ instructions (str): Instructions for guidance.
+ extra_fields (Dict[str, Any], optional): Additional fields to include during batch processing.
+
+ Returns:
+ InternalDataFrame: The processed batch of records.
+ """
+
+ outputs = self.get_outputs(output_template)
+
+ extra_fields = extra_fields or {}
+ # copy extra fields to avoid modification of the original dict
+ extra_fields = extra_fields.copy()
+ # TODO: it's not efficient way to initialize the program here - should be done once
+ extra_fields.update({
+ 'input_program': self.get_input_program(input_template),
+ 'output_program': self.get_output_program(output_template),
+ 'instructions_program': self.get_instructions_program(instructions),
+ })
+ output = batch.progress_apply(
+ self._process_record,
+ axis=1,
+ result_type='expand',
+ program=self._program,
+ outputs=outputs,
+ extra_fields=extra_fields
+ )
+ return output
+
+ def process_batch_inputs(
+ self,
+ batch: InternalDataFrame,
+ input_template: str,
+ extra_fields: Optional[Dict[str, Any]] = None,
+ ) -> InternalDataFrame:
+ """Processes inputs for a batch of records using the provided input template.
+
+ Args:
+ batch (InternalDataFrame): The batch of records for input processing.
+ input_template (str): The template for input processing.
+ extra_fields (Dict[str, Any], optional): Additional fields to include during input processing.
+
+ Returns:
+ InternalDataFrame: The processed inputs for the batch of records.
+ """
+
+ output = batch.progress_apply(
+ self._process_record,
+ axis=1,
+ result_type='expand',
+ program=self.get_input_program(input_template),
+ extra_fields=extra_fields or {}
+ )
+ return output
+
+
+class CodeRuntime(Runtime):
+ """Base class representing a runtime designed for executing code."""
diff --git a/adala/runtimes/openai.py b/adala/runtimes/openai.py
new file mode 100644
index 00000000..94c308cd
--- /dev/null
+++ b/adala/runtimes/openai.py
@@ -0,0 +1,74 @@
+import os
+import openai
+from pydantic import model_validator, field_validator, ValidationError, ValidationInfo, Field
+from typing import Optional, Dict
+from .base import LLMRuntime, LLMRuntimeType, LLMRuntimeModelType
+from adala.utils.logs import print_error
+
+
+class OpenAIRuntime(LLMRuntime):
+ """Runtime class specifically designed for OpenAI models.
+
+ This class is tailored to use OpenAI models, particularly GPT models.
+ It inherits from the `LLMRuntime` class and thus can utilize its functionalities but specializes
+ for the OpenAI ecosystem.
+
+ Attributes:
+ api_key (str): The API key required to access OpenAI's API.
+ gpt_model_name (str): Name of the GPT model. Defaults to 'gpt-3.5-turbo-instruct'.
+ temperature (float): Sampling temperature for the GPT model's output.
+ A higher value makes output more random, while a lower value makes it more deterministic.
+ Defaults to 0.0.
+ """
+
+ api_key: Optional[str] = None
+ gpt_model_name: Optional[str] = Field(default='gpt-3.5-turbo-instruct', alias='model')
+ temperature: Optional[float] = 0.0
+
+ def _check_api_key(self):
+ if self.api_key:
+ return
+ self.api_key = os.getenv('OPENAI_API_KEY')
+ if not self.api_key:
+ print_error(
+ 'OpenAI API key is not provided. Please set the OPENAI_API_KEY environment variable:\n\n'
+ 'export OPENAI_API_KEY=your-openai-api-key\n\n'
+ 'or set the `api_key` attribute of the `OpenAIRuntime` python class:\n\n'
+ f'{self.__class__.__name__}(..., api_key="your-openai-api-key")\n\n'
+ f'Read more about OpenAI API keys at https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key')
+ raise ValidationError('OpenAI API key is not provided.')
+
+ def _check_model_availability(self):
+ models = openai.Model.list(api_key=self.api_key)
+ models = set(model['id'] for model in models['data'])
+ if self.gpt_model_name not in models:
+ print_error(
+ f'Requested model "{self.gpt_model_name}" is not available in your OpenAI account. '
+ f'Available models are: {models}\n\n'
+ f'Try to change the runtime settings for {self.__class__.__name__}, for example:\n\n'
+ f'{self.__class__.__name__}(..., model="gpt-3.5-turbo")\n\n'
+ )
+ raise ValidationError(f'Requested model {self.gpt_model_name} is not available in your OpenAI account.')
+
+ def init_runtime(self):
+ self._check_api_key()
+ self._check_model_availability()
+
+ student_models = {'gpt-3.5-turbo-instruct', 'text-davinci-003'}
+ teacher_models = {'gpt-4', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k'}
+
+ if self.gpt_model_name in student_models:
+ self.llm_runtime_type = LLMRuntimeType.STUDENT
+ elif self.gpt_model_name in teacher_models:
+ self.llm_runtime_type = LLMRuntimeType.TEACHER
+ else:
+ raise NotImplementedError(f'Not supported model: {self.gpt_model_name}.')
+
+ self.llm_runtime_model_type = LLMRuntimeModelType.OpenAI
+ self.llm_params = {
+ 'model': self.gpt_model_name,
+ 'temperature': self.temperature,
+ 'api_key': self.api_key
+ }
+ self._create_program()
+ return self
diff --git a/adala/skills/__init__.py b/adala/skills/__init__.py
new file mode 100644
index 00000000..2b1c62d5
--- /dev/null
+++ b/adala/skills/__init__.py
@@ -0,0 +1,6 @@
+from .labeling.classification import LLMSkill, ClassificationSkill, ClassificationSkillWithCoT
+from .labeling.sequence_labeling import SequenceLabelingSkill
+from .generation.base import TextGenerationSkill
+from .generation.qa import QuestionAnsweringSkill
+from .generation.summarization import SummarizationSkill
+
diff --git a/adala/skills/base.py b/adala/skills/base.py
new file mode 100644
index 00000000..c4404ff8
--- /dev/null
+++ b/adala/skills/base.py
@@ -0,0 +1,375 @@
+import openai
+import pandas as pd
+import re
+
+from pydantic import BaseModel
+from typing import List, Optional, Any, Dict, Tuple
+from abc import ABC, abstractmethod
+from pydantic import Field, model_validator
+
+from typing import Optional
+from adala.runtimes.base import LLMRuntime
+from adala.datasets.base import Dataset
+from adala.runtimes.base import Runtime
+from adala.memories.base import ShortTermMemory, LongTermMemory
+from adala.utils.internal_data import InternalDataFrame, InternalDataFrameConcat
+from adala.utils.logs import print_error
+
+
+class BaseSkill(BaseModel, ABC):
+ """
+ A foundational abstract class representing a skill. This class sets the foundation
+ for all skills and provides common attributes and methods for skill-based operations.
+ """
+ name: str = Field(
+ title='Skill name',
+ description='Unique name of the skill',
+ default='',
+ examples=['labeling', 'classification', 'text-generation']
+ )
+ instructions: str = Field(
+ title='Skill instructions',
+ description='Instructs agent what to do with the input data. '
+ 'Can use templating to refer to input fields.',
+ default='',
+ examples=['Label the input text with the following labels: {{labels}}']
+ )
+ description: Optional[str] = Field(
+ default='',
+ title='Skill description',
+ description='Description of the skill. Can be used to retrieve skill from the library.',
+ examples=['The skill to perform sentiment analysis on the input text.']
+ )
+ input_template: Optional[str] = Field(
+ title='Input template',
+ description='Template for the input data. '
+ 'Can use templating to refer to input parameters and perform data transformations.',
+ default="Input: {{{{{input}}}}}",
+ examples=["Text: {{{{{input}}}}}, Date: {{{{date_column}}}}, Sentiment: {{{{gen 'sentiment'}}}}"]
+ )
+ input_data_field: Optional[str] = Field(
+ title='Input data field',
+ description='Input data field name that will be used to match input data.',
+ examples=['text'],
+ # TODO: either make it required, or `input_template` required
+ default=None
+ )
+ output_template: Optional[str] = Field(
+ title='Output template',
+ description='Template for the output data. '
+ 'Can use templating to refer to input parameters and perform data transformations. '
+ 'Should contain at least one field matching `validation_fields`.',
+ default="Output: {{gen 'predictions'}}",
+ examples=["Output: {{select 'predictions' options=labels logprobs='score'}}"]
+ )
+ prediction_field: Optional[str] = Field(
+ title='Prediction field',
+ description='Prediction field name that will be used to match ground truth labels.'
+ 'Should match at least one output field in `output_template`, e.g. \'predictions\'',
+ examples=['predictions'],
+ default='predictions'
+ )
+
+ @model_validator(mode='after')
+ def validate_inputs(self):
+ """
+ Validates the input_template, updating it if necessary.
+
+ Returns:
+ BaseSkill: Updated instance of the BaseSkill class.
+ """
+ if '{{{{{input}}}}}' in self.input_template:
+ if self.input_data_field is None:
+ print_error(f'You provided skill "{self.name}" with input template:\n\n'
+ f'{self.__class__.__name__}.input_template = "{self.input_template}"\n\n'
+ 'that contains "{{{{{input}}}}}" placeholder. (yes... 5 curly braces!) \n\n'
+ 'In this case, you have to provide skill with `skill.input_data_field` to match the input data.'
+ f'\nFor example, if your input data stored in `"text"` column, '
+ f'you can set\n\nskill = {self.__class__.__name__}(..., input_data_field="text")')
+ raise ValueError(f'`input_data_field` is not provided for skill {self.name}')
+ self.input_template = self.input_template.format(input=self.input_data_field)
+ return self
+
+ def __call__(self, input: InternalDataFrame, runtime: Runtime, dataset: Dataset) -> InternalDataFrame:
+ """Calls the runtime to process a batch of inputs. Input and
+ output shapes can be varying, and it should also take care of
+ data types validation
+
+ Args:
+ input (InternalDataFrame): Input data in the form of an InternalDataFrame.
+ runtime (Runtime): The runtime instance to be used for processing.
+ dataset (Dataset): The dataset containing the data to be processed.
+
+ Returns:
+ InternalDataFrame: Concatenated dataframe with the original input and the predictions from the runtime.
+
+ """
+
+ # get user defined dataset input fields
+
+ runtime_predictions = runtime.process_batch(
+ batch=input,
+ input_template=self.input_template,
+ output_template=self.output_template,
+ instructions=self.instructions,
+ extra_fields=self._get_extra_fields()
+ )
+ return InternalDataFrameConcat((input, runtime_predictions), axis=1)
+
+ def _get_extra_fields(self):
+ """
+ Retrieves fields that are not categorized as system fields.
+
+ Returns:
+ dict: A dictionary containing fields that are not system fields.
+ """
+
+ # TODO: more robust way to exclude system fields
+ system_fields = {
+ 'name', 'description', 'input_template', 'output_template', 'instructions', 'validation_fields'}
+ extra_fields = self.model_dump(exclude=system_fields)
+ return extra_fields
+
+ @abstractmethod
+ def apply(
+ self, dataset: Dataset,
+ runtime: Runtime,
+ experience: ShortTermMemory
+ ) -> ShortTermMemory:
+ """
+ Applies the skill to a dataset and returns the results.
+
+ Args:
+ dataset (Dataset): The dataset on which the skill is to be applied.
+ runtime (Runtime): The runtime instance to be used for processing.
+ experience (ShortTermMemory): Previous experiences or results.
+
+ Returns:
+ ShortTermMemory: The updated experience after applying the skill.
+ """
+
+ @abstractmethod
+ def analyze(
+ self, experience: ShortTermMemory,
+ student_runtime: Runtime,
+ teacher_runtime: Optional[Runtime] = None,
+ memory: Optional[LongTermMemory] = None,
+ ) -> ShortTermMemory:
+ """
+ Analyzes the results to derive new experiences.
+
+ Args:
+ experience (ShortTermMemory): The current experience.
+ student_runtime (Runtime): The student runtime instance. Defaults to None.
+ teacher_runtime (Runtime, optional): The teacher runtime instance. Defaults to None.
+ memory (LongTermMemory, optional): Previous long term memories. Defaults to None.
+
+ Returns:
+ ShortTermMemory: The updated experience after analysis.
+ """
+
+ @abstractmethod
+ def improve(
+ self,
+ experience: ShortTermMemory,
+ runtime: Runtime,
+ update_instructions: bool = True,
+ ) -> ShortTermMemory:
+ """
+ Refines the current state of the skill based on its experiences.
+
+ Args:
+ experience (ShortTermMemory): The current experience.
+ runtime (Runtime): The runtime instance to be used for processing.
+ update_instructions (bool, optional): Flag to decide if instructions should be updated. Defaults to True.
+
+ Returns:
+ ShortTermMemory: The updated experience after improvements.
+ """
+
+
+class LLMSkill(BaseSkill):
+ """
+ A skill specialized for Language Models (LLM). Inherits from the BaseSkill
+ class and provides specific implementations for handling LLM predictions based
+ on given instructions.
+ """
+
+ def apply(
+ self,
+ dataset: Dataset,
+ runtime: LLMRuntime,
+ experience: ShortTermMemory
+ ) -> ShortTermMemory:
+ """
+ Applies the LLM skill on a dataset and returns the results.
+
+ Args:
+ dataset (Dataset): The dataset on which the skill is to be applied.
+ runtime (LLMRuntime): The runtime instance to be used for processing.
+ experience (ShortTermMemory): Previous experiences or results.
+
+ Returns:
+ ShortTermMemory: The updated experience after applying the skill.
+ """
+
+ experience = experience.model_copy()
+
+ predictions = []
+
+ for batch in dataset.batch_iterator():
+ runtime_predictions = self(batch, runtime, dataset)
+ predictions.append(runtime_predictions)
+
+ if not predictions:
+ predictions = InternalDataFrame()
+ else:
+ predictions = InternalDataFrameConcat(predictions, copy=False)
+ predictions.rename(columns={self.prediction_field: self.name}, inplace=True)
+
+ # append predictions to existing experience, to chain skills
+ # TODO: implement predictions chaining
+ experience.predictions = predictions
+ # if experience.predictions is None:
+ # experience.predictions = predictions
+ # else:
+ # experience.predictions = InternalDataFrameConcat([
+ # experience.predictions.drop(columns=[col for col in experience.predictions.columns if col in predictions.columns]),
+ # predictions
+ # ], axis=1)
+ # raise NotImplementedError
+
+ return experience
+
+ def analyze(
+ self, experience: ShortTermMemory,
+ student_runtime: Runtime,
+ teacher_runtime: Optional[Runtime] = None,
+ memory: Optional[LongTermMemory] = None
+ ) -> ShortTermMemory:
+ """
+ Analyzes the results to identify any discrepancies and returns the observed experience.
+
+ Args:
+ experience (ShortTermMemory): The current experience.
+ student_runtime (Runtime): The student runtime instance. Defaults to None.
+ teacher_runtime (Runtime, optional): The teacher runtime instance. Defaults to None.
+ memory (LongTermMemory, optional): Previous long term memories. Defaults to None.
+
+ Returns:
+ ShortTermMemory: The updated experience after analysis.
+ """
+
+ experience = experience.model_copy()
+
+ # TODO: can be multiple prediction validation fields
+ match = experience.match_column_name
+ errors = experience.evaluations[~experience.evaluations[match]]
+ experience.accuracy = experience.evaluations[match].mean()
+ if errors.empty:
+ # No errors - nothing to analyze
+ experience.errors = errors
+ return experience
+
+ # collect errors and create error report
+ # first sample errors - make it uniform, but more sophisticated sampling can be implemented
+ errors = errors.sample(n=min(3, errors.shape[0]))
+
+ # collect error inputs from runtime
+ extra_fields = self._get_extra_fields()
+ inputs = student_runtime.process_batch_inputs(
+ batch=errors,
+ input_template=self.input_template,
+ extra_fields=extra_fields
+ )
+
+ # construct error report
+ errors = pd.concat([
+ inputs,
+ errors[[self.name, experience.ground_truth_column_name]]
+ ], axis=1)
+ errors.columns = ['input', 'prediction', 'ground_truth']
+ if not teacher_runtime:
+ teacher_runtime = student_runtime
+
+ error_reasons = teacher_runtime.process_batch(
+ errors,
+ instructions="{{#system~}}\n"
+ "LLM prompt was created by concatenating instructions with text input:\n\n"
+ "Prediction = LLM(Input, Instructions)\n\n"
+ "We expect the prediction to be equal to the ground truth.\n"
+ "Your task is to provide a reason for the error due to the original instruction.\n"
+ "Be concise and specific.\n\n"
+ f"Instructions: {self.instructions}\n"
+ "{{~/system}}",
+ input_template="{{#user~}}\n"
+ "{{input}}\n"
+ "Prediction: {{prediction}}\n"
+ "Ground truth: {{ground_truth}}\n"
+ "Explanation:\n"
+ "{{~/user}}",
+ output_template="{{#assistant~}}{{gen 'reason'}}{{~/assistant}}",
+ extra_fields=extra_fields
+ )
+ errors['reason'] = error_reasons['reason']
+
+ experience.errors = errors
+ return experience
+
+ def improve(
+ self,
+ experience: ShortTermMemory,
+ runtime: Runtime,
+ update_instructions: bool = True,
+ ) -> ShortTermMemory:
+ """
+ Refines the LLM skill based on its recent experiences.
+
+ Args:
+ experience (ShortTermMemory): The current experience.
+ runtime (Runtime): The runtime instance to be used for processing.
+ update_instructions (bool, optional): Flag to decide if instructions should be updated. Defaults to True.
+
+ Returns:
+ ShortTermMemory: The updated experience after improvements.
+ """
+
+ experience = experience.model_copy()
+
+ errors = experience.errors.to_dict(orient='records')
+ result = runtime.process_record(
+ record={
+ 'errors': errors
+ },
+ instructions="{{#system~}}\n"
+ "LLM prompt was created by concatenating instructions with text input:\n\n"
+ "Prediction = LLM(Input, Instructions)\n\n"
+ "We expect the prediction to be equal to the ground truth.\n"
+ "Your task is to craft a revised concise instruction for the LLM. "
+ "Follow best practices for LLM prompt engineering.\n"
+ "Include 2-3 examples at the end of your response to demonstrate how the new instruction would be applied.\n"
+ "Use the following format for your examples:\n"
+ "Input: ...\n"
+ "Output: ...\n\n"
+ "{{~/system}}\n",
+ input_template="{{#user~}}\n"
+ f"Old instruction: {self.instructions}\n\n"
+ "Errors:\n{{#each errors}}"
+ "\n{{this.input}}\n"
+ "Prediction: {{this.prediction}}\n"
+ "Ground truth: {{this.ground_truth}}\n"
+ "{{/each}}\n"
+ "New instruction:\n"
+ "{{~/user}}",
+ output_template="{{#assistant~}}{{gen 'new_instruction'}}{{~/assistant}}",
+ extra_fields=self._get_extra_fields()
+ )
+ new_instruction = result['new_instruction']
+
+ experience.initial_instructions = self.instructions
+ experience.updated_instructions = new_instruction
+
+ if update_instructions:
+ self.instructions = new_instruction
+
+ return experience
diff --git a/adala/skills/generation/__init__.py b/adala/skills/generation/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/adala/skills/generation/base.py b/adala/skills/generation/base.py
new file mode 100644
index 00000000..d943aff0
--- /dev/null
+++ b/adala/skills/generation/base.py
@@ -0,0 +1,15 @@
+from ..base import LLMSkill
+
+
+class TextGenerationSkill(LLMSkill):
+ """
+ Skill specialized for generating text based on the provided input.
+
+ This involves tasks where the LLM is expected to produce creative, coherent, and contextually
+ relevant textual content based on the given input.
+
+ Attributes:
+ instructions (str): Instruction to guide the LLM in text generation.
+ """
+
+ instructions: str = 'Generate text based on the provided input.'
diff --git a/adala/skills/generation/qa.py b/adala/skills/generation/qa.py
new file mode 100644
index 00000000..48a75cf1
--- /dev/null
+++ b/adala/skills/generation/qa.py
@@ -0,0 +1,23 @@
+from .base import TextGenerationSkill
+
+
+class QuestionAnsweringSkill(TextGenerationSkill):
+ """
+ Skill specialized for answering questions based on the provided input.
+
+ Inherits from the TextGenerationSkill and focuses on generating answers to the questions
+ posed in the input. The class customizes the instructions, input, and output templates
+ specifically for question-answering tasks.
+
+ Attributes:
+ instructions (str): Instruction to guide the LLM in answering the question.
+ input_template (str): Format in which the question is presented to the LLM.
+ output_template (str): Expected format of the LLM's answer.
+ prediction_field (str): Field name for the generated answer.
+ """
+
+ instructions: str = 'Answer the question.'
+ input_template: str = "Question: {{{{{input}}}}}"
+ output_template: str = "Answer: {{gen 'answer'}}"
+ prediction_field: str = 'answer'
+
diff --git a/adala/skills/generation/summarization.py b/adala/skills/generation/summarization.py
new file mode 100644
index 00000000..a2c980ab
--- /dev/null
+++ b/adala/skills/generation/summarization.py
@@ -0,0 +1,22 @@
+from .base import TextGenerationSkill
+
+
+class SummarizationSkill(TextGenerationSkill):
+ """
+ Skill specialized for summarizing lengthy texts based on the provided input.
+
+ Inherits from the TextGenerationSkill and focuses on generating concise summaries
+ for the input texts. The class customizes the instructions, input, and output templates
+ specifically for text summarization tasks.
+
+ Attributes:
+ instructions (str): Instruction to guide the LLM in summarizing the text.
+ input_template (str): Format in which the full text is presented to the LLM.
+ output_template (str): Expected format of the LLM's summary.
+ prediction_field (str): Field name for the generated summary.
+ """
+
+ instructions: str = 'Summarize the text.'
+ input_template: str = "Text: {{{{{input}}}}}"
+ output_template: str = "Summary: {{gen 'summary'}}"
+ prediction_field: str = 'summary'
diff --git a/adala/skills/generation/translation.py b/adala/skills/generation/translation.py
new file mode 100644
index 00000000..319f7442
--- /dev/null
+++ b/adala/skills/generation/translation.py
@@ -0,0 +1,27 @@
+from .base import TextGenerationSkill
+
+
+class TranslationSkill(TextGenerationSkill):
+ """
+ Skill specialized for translating text from one language to another.
+
+ Inherits from the TextGenerationSkill and focuses on translating the input text to the
+ specified target language. The class customizes the instructions, input, and output templates
+ specifically for translation tasks.
+
+ Attributes:
+ instructions (str): Instruction to guide the LLM in translating the text.
+ input_template (str): Format in which the full text is presented to the LLM.
+ output_template (str): Expected format of the LLM's translation.
+ prediction_field (str): Field name for the generated translation.
+ target_language (str): Language to which the input text is translated.
+ """
+
+ name: str = 'translation'
+ description: str = 'Translate text from one language to another.'
+ instructions: str = 'Identify the language of the given text and translate it to {{target_language}}.'
+ input_template: str = "Text: {{{{{input}}}}}"
+ # output_template: str = "Input language: {{gen 'detected_language'}}\nTranslation: {{gen 'translation'}}"
+ output_template: str = "Translation: {{gen 'translation'}}"
+ prediction_field: str = 'translation'
+ target_language: str = 'English'
diff --git a/adala/skills/labeling/__init__.py b/adala/skills/labeling/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/adala/skills/labeling/classification.py b/adala/skills/labeling/classification.py
new file mode 100644
index 00000000..c80204e1
--- /dev/null
+++ b/adala/skills/labeling/classification.py
@@ -0,0 +1,35 @@
+from ..base import LLMSkill
+from typing import List
+
+
+class ClassificationSkill(LLMSkill):
+ """
+ Skill specialized for classifying text inputs based on a predefined set of labels.
+
+ Args:
+ instructions (str): Templated instruction to guide the LLM in classification.
+ labels (List[str]): A list of valid labels for the classification task.
+ output_template (str): Templated string to format the output from the LLM.
+ prediction_field (str): Specifies the field in which predictions will be stored.
+ """
+
+ instructions: str = 'Label the input text with the following labels: {{labels}}'
+ labels: List[str]
+ output_template: str = "Output: {{select 'predictions' options=labels logprobs='score'}}"
+ prediction_field: str = 'predictions'
+
+
+class ClassificationSkillWithCoT(ClassificationSkill):
+ """
+ Skill specialized for classifying text inputs with the addition of generating a Chain of Thought.
+
+ Args:
+ instructions (str): Templated instruction to guide the LLM in classification and to generate a rationale.
+ labels (List[str]): A list of valid labels for the classification task.
+ input_template (str): Templated string to format the input, which includes a rationale (thoughts).
+ output_template (str): Templated string to format the output from the LLM.
+ prediction_field (str): Specifies the field in which predictions will be stored.
+ """
+
+ instructions: str = 'Label the input text with the following labels: {{labels}}. Provide a rationale for your answer.'
+ output_template: str = "Thoughts: {{gen 'rationale'}}\nOutput: {{select 'predictions' options=labels logprobs='score'}}"
diff --git a/adala/skills/labeling/sequence_labeling.py b/adala/skills/labeling/sequence_labeling.py
new file mode 100644
index 00000000..81611858
--- /dev/null
+++ b/adala/skills/labeling/sequence_labeling.py
@@ -0,0 +1,26 @@
+from ..base import LLMSkill
+from typing import List
+
+
+class SequenceLabelingSkill(LLMSkill):
+ """
+ Skill specialized for sequence labeling on text inputs based on a predefined set of labels.
+
+ This involves tasks like named entity recognition where each word/token in the sequence
+ might be assigned a label.
+
+ Args:
+ instructions (str): Templated instruction to guide the LLM in sequence labeling.
+ labels (List[str]): A list of valid labels for the sequence labeling task.
+ input_template (str): Templated string to format the input for the LLM.
+ output_template (str): Templated string to format the output from the LLM.
+ prediction_field (str): Specifies the field in which predictions will be stored.
+
+ Note:
+ This class is still a work in progress.
+ """
+ instructions: str = 'Label the input text with the following labels: {{labels}}'
+ labels: List[str]
+ input_template: str = "Input: {{{{{input}}}}}"
+ output_template: str = "Output: {{select 'predictions' options=labels logprobs='score'}}"
+ prediction_field: str = 'predictions'
diff --git a/adala/skills/skillset.py b/adala/skills/skillset.py
new file mode 100644
index 00000000..eff7cb47
--- /dev/null
+++ b/adala/skills/skillset.py
@@ -0,0 +1,137 @@
+from pydantic import BaseModel, model_validator
+from abc import ABC, abstractmethod
+from typing import List, Union, Dict, Any, Optional
+from adala.datasets.base import Dataset
+from adala.runtimes.base import Runtime
+from adala.memories.base import ShortTermMemory
+from .base import BaseSkill
+
+
+class SkillSet(BaseModel, ABC):
+ """
+ Represents a collection of interdependent skills aiming to achieve a specific goal.
+
+ A skill set breaks down the path to achieve a goal into necessary precursor skills.
+ Agents can evolve these skills either in parallel for tasks like self-consistency or
+ sequentially for complex problem decompositions and causal reasoning. In the most generic
+ cases, task decomposition can involve a graph-based approach.
+
+ Args:
+ skills (Dict[str, BaseSkill]): Dictionary mapping skill names to their corresponding
+ BaseSkill instances.
+ """
+
+ skills: Dict[str, BaseSkill]
+
+ @abstractmethod
+ def apply(self, dataset: Dataset, runtime: Runtime, experience: Optional[ShortTermMemory] = None) -> ShortTermMemory:
+ """
+ Apply the skill set to a dataset using a specified runtime.
+
+ Args:
+ dataset (Dataset): The dataset to apply the skill set to.
+ runtime (Runtime): The runtime environment in which to apply the skills.
+ experience (Optional[ShortTermMemory], optional): Existing experience data. Defaults to None.
+
+ Returns:
+ ShortTermMemory: Updated experience after applying the skill set.
+ """
+
+ @abstractmethod
+ def select_skill_to_improve(self, experience: ShortTermMemory) -> BaseSkill:
+ """
+ Select the next skill to enhance based on the current experience.
+
+ Args:
+ experience (ShortTermMemory): Current experience data.
+
+ Returns:
+ BaseSkill: Skill selected for improvement.
+ """
+
+
+class LinearSkillSet(SkillSet):
+ """
+ Represents a sequence of skills that are acquired in a specific order to achieve a goal.
+
+ LinearSkillSet ensures that skills are developed in a sequential manner, determined either
+ by the provided skill_sequence or by the lexicographical order of skill names.
+
+ Args:
+ skill_sequence (List[str], optional): Ordered list of skill names indicating the order
+ in which they should be acquired.
+ """
+
+ skill_sequence: List[str] = None
+
+ @model_validator(mode='after')
+ def skill_sequence_validator(self):
+ """
+ Validates and sets the default order for the skill sequence if not provided.
+
+ Returns:
+ LinearSkillSet: The current instance with updated skill_sequence attribute.
+ """
+
+ if self.skill_sequence is None:
+ # use default skill sequence defined by lexicographical order
+ self.skill_sequence = sorted(self.skills.keys())
+ return self
+
+ def apply(
+ self, dataset: Dataset,
+ runtime: Runtime,
+ experience: Optional[ShortTermMemory] = None
+ ) -> ShortTermMemory:
+ """
+ Sequentially applies each skill on the dataset, enhancing the agent's experience.
+
+ Args:
+ dataset (Dataset): The dataset to apply the skills on.
+ runtime (Runtime): The runtime environment in which to apply the skills.
+ experience (Optional[ShortTermMemory], optional): Existing experience data. Defaults to None.
+
+ Returns:
+ ShortTermMemory: Updated experience after sequentially applying the skills.
+ """
+ if experience is None:
+ experience = ShortTermMemory(dataset=dataset)
+ else:
+ experience = experience.model_copy()
+
+ for skill_name in self.skill_sequence:
+ skill = self.skills[skill_name]
+ experience = skill.apply(dataset, runtime, experience)
+
+ return experience
+
+ def select_skill_to_improve(self, experience: ShortTermMemory) -> BaseSkill:
+ """
+ Picks the next skill for improvement in the sequence.
+
+ Args:
+ experience (ShortTermMemory): Current experience data.
+
+ Returns:
+ BaseSkill: The next skill selected for improvement.
+ """
+
+ # TODO: implement real logic for skill selection
+ return self.skills[self.skill_sequence[0]]
+
+ def __rich__(self):
+ """Returns a rich representation of the skill."""
+ # TODO: move it to a base class and use repr derived from Skills
+ text = f"[bold blue]Total Agent Skills: {len(self.skills)}[/bold blue]\n\n"
+ for skill in self.skills.values():
+ text += f'[bold underline green]{skill.name}[/bold underline green]\n' \
+ f'[green]{skill.instructions}[green]\n'
+ return text
+
+
+class ParallelSkillSet(SkillSet):
+ """
+ Represents a set of skills that are acquired simultaneously to reach a goal.
+ """
+
+ pass
diff --git a/adala/utils/__init__.py b/adala/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/adala/utils/internal_data.py b/adala/utils/internal_data.py
new file mode 100644
index 00000000..8c8f6699
--- /dev/null
+++ b/adala/utils/internal_data.py
@@ -0,0 +1,16 @@
+import pandas as pd
+from typing import List, Dict, Any, Union, Iterable
+
+RawRecord = Dict[str, Any]
+RawRecords = List[RawRecord]
+
+# Internal data tables representation. Replace this with Dask or Polars in the future.
+InternalDataFrame = pd.DataFrame
+
+
+def InternalDataFrame_encoder(df: InternalDataFrame) -> List:
+ return df.to_dict(orient='records')
+
+
+def InternalDataFrameConcat(dfs: Iterable[InternalDataFrame], **kwargs) -> InternalDataFrame:
+ return pd.concat(dfs, **kwargs)
diff --git a/adala/utils/logs.py b/adala/utils/logs.py
new file mode 100644
index 00000000..d9cca2a3
--- /dev/null
+++ b/adala/utils/logs.py
@@ -0,0 +1,48 @@
+import pandas as pd
+import time
+
+from rich import print
+from rich.table import Table
+from rich import box
+from rich.console import Console
+from typing import Optional
+from .internal_data import InternalDataFrame
+
+console = Console()
+error_console = Console(stderr=True, style="bold red")
+
+
+def print_text(text: str, style=None, streaming_style=False):
+ if streaming_style:
+ for char in text:
+ console.print(char, sep='', end='', style=style)
+ time.sleep(0.01)
+ console.print()
+ else:
+ console.print(text, style=style)
+
+
+def print_error(text: str):
+ error_console.print(text)
+
+
+def print_dataframe(dataframe: InternalDataFrame):
+ num_rows = 5
+ table = Table(show_header=True, header_style="bold magenta")
+ # index_name = dataframe.index.name or 'index'
+ # table.add_column(index_name)
+
+ for column in dataframe.columns:
+ table.add_column(str(column))
+
+ for index, value_list in enumerate(dataframe.iloc[:num_rows].values.tolist()):
+ # row = [str(index)]
+ row = []
+ row += [str(x) for x in value_list]
+ table.add_row(*row)
+
+ # Update the style of the table
+ table.row_styles = ["none", "dim"]
+ table.box = box.SIMPLE_HEAD
+
+ console.print(table)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..43dd0a3c
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,24 @@
+# Adala documentation
+
+To develop the documentation, you need to install [MkDocs](https://www.mkdocs.org/).
+
+```
+pip install requirements.txt
+```
+
+For full documentation visit [mkdocs.org](https://www.mkdocs.org).
+
+## Commands
+
+* `mkdocs new [dir-name]` - Create a new project.
+* `mkdocs serve` - Start the live-reloading docs server.
+* `mkdocs build` - Build the documentation site.
+* `mkdocs -h` - Print help message and exit.
+
+## Project layout
+
+ mkdocs.yml # The configuration file.
+ docs/
+ index.md # The documentation homepage.
+ ... # Other markdown pages, images and other files.
+
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
new file mode 100644
index 00000000..472e8f82
--- /dev/null
+++ b/docs/mkdocs.yml
@@ -0,0 +1,46 @@
+site_name: Adala Docs
+
+logo: src/img/logo.png
+favicon: src/img/logo.png
+
+repo_url: https://github.com/humansignal/Adala
+edit_uri: https://github.com/humansignal/Adala/tree/main/docs/src
+
+repo_name: humansignal/Adala
+docs_dir: src
+
+plugins:
+ - search
+ - autorefs
+ - mkdocs-jupyter
+ - mkdocstrings
+
+
+theme:
+ name: "material"
+ features:
+ - content.code.copy
+ - content.tabs.link
+ - content.action.edit
+ - toc.follow
+ - toc.integrate
+ - navigation.top
+ - navigation.tabs
+ - navigation.tabs.sticky
+ - navigation.footer
+ - navigation.tracking
+ - navigation.instant
+ - navigation.indexes
+ - navigation.expand
+ - navigation.sections
+
+
+nav:
+ - Home: 'index.md'
+ - agents.md
+ - datasets.md
+ - environments.md
+ - memories.md
+ - runtimes.md
+ - skills.md
+ # - utils.md
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..2f00bce9
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,4 @@
+mkdocs
+mkdocs-jupyter
+mkdocs-material
+mkdocstrings[python]
diff --git a/docs/src/agents.md b/docs/src/agents.md
new file mode 100644
index 00000000..5b6e9da9
--- /dev/null
+++ b/docs/src/agents.md
@@ -0,0 +1,2 @@
+
+::: adala.agents.base
diff --git a/docs/src/datasets.md b/docs/src/datasets.md
new file mode 100644
index 00000000..a96ed85a
--- /dev/null
+++ b/docs/src/datasets.md
@@ -0,0 +1,4 @@
+
+::: adala.datasets.base
+
+::: adala.datasets.dataframe
diff --git a/docs/src/environments.md b/docs/src/environments.md
new file mode 100644
index 00000000..27c5e608
--- /dev/null
+++ b/docs/src/environments.md
@@ -0,0 +1,2 @@
+
+::: adala.environments.base
diff --git a/docs/src/img/diagram.png b/docs/src/img/diagram.png
new file mode 100644
index 00000000..e430937e
Binary files /dev/null and b/docs/src/img/diagram.png differ
diff --git a/docs/src/img/logo.png b/docs/src/img/logo.png
new file mode 100644
index 00000000..b37a89a8
Binary files /dev/null and b/docs/src/img/logo.png differ
diff --git a/docs/src/index.md b/docs/src/index.md
new file mode 100644
index 00000000..90a68d3c
--- /dev/null
+++ b/docs/src/index.md
@@ -0,0 +1,109 @@
+# Quickstart
+
+Adala is an **A**utonomous **DA**ta (**L**abeling) **A**gent framework.
+
+Adala offers a robust framework for implementing agents specialized in data processing, with an emphasis on
+diverse data labeling tasks. These agents are autonomous, meaning they can independently acquire one or more skills
+through iterative learning. This learning process is influenced by their operating environment, observations, and
+reflections. Users define the environment by providing a ground truth dataset. Every agent learns and applies its skills
+in what we refer to as a "runtime", synonymous with LLM.
+
+![Diagram of components](img/diagram.png)
+
+
+## Installation
+
+Install Adala:
+
+```sh
+pip install adala
+```
+
+## Prerequisites
+
+Set OPENAI_API_KEY ([see instructions here](https://platform.openai.com/docs/quickstart/step-2-setup-your-api-key))
+
+
+## ๐ฌ Quickstart
+
+In this example we will use Adala as a standalone library directly inside Python notebook.
+
+Click [here](https://github.com/HumanSignal/Adala/blob/master/examples/quickstart.ipynb) to see an extended quickstart example.
+
+```python
+import pandas as pd
+
+from adala.agents import Agent
+from adala.datasets import DataFrameDataset
+from adala.environments import BasicEnvironment
+from adala.skills import ClassificationSkill
+from adala.runtimes import OpenAIRuntime
+from rich import print
+
+# Train dataset
+ground_truth_df = pd.DataFrame([
+ ["It was the negative first impressions, and then it started working.", "Positive"],
+ ["Not loud enough and doesn't turn on like it should.", "Negative"],
+ ["I don't know what to say.", "Neutral"],
+ ["Manager was rude, but the most important that mic shows very flat frequency response.", "Positive"],
+ ["The phone doesn't seem to accept anything except CBR mp3s.", "Negative"],
+ ["I tried it before, I bought this device for my son.", "Neutral"],
+], columns=["text", "ground_truth"])
+
+# Test dataset
+predict_df = pd.DataFrame([
+ "All three broke within two months of use.",
+ "The device worked for a long time, can't say anything bad.",
+ "Just a random line of text."
+], columns=["text"])
+
+ground_truth_dataset = DataFrameDataset(df=ground_truth_df)
+predict_dataset = DataFrameDataset(df=predict_df)
+
+agent = Agent(
+ # connect to a dataset
+ environment=BasicEnvironment(
+ ground_truth_dataset=ground_truth_dataset,
+ ground_truth_column="ground_truth"
+ ),
+
+ # define a skill
+ skills=ClassificationSkill(
+ name='sentiment_classification',
+ instructions="Label text as subjective or objective.",
+ labels=["Positive", "Negative", "Neutral"],
+ input_data_field='text'
+ ),
+
+ # define all the different runtimes your skills may use
+ runtimes = {
+ # You can specify your OPENAI API KEY here via `OpenAIRuntime(..., api_key='your-api-key')`
+ 'openai': OpenAIRuntime(model='gpt-3.5-turbo-instruct'),
+ 'openai-gpt3': OpenAIRuntime(model='gpt-3.5-turbo'),
+ # 'openai-gpt4': OpenAIRuntime(model='gpt-4'),
+ },
+ default_runtime='openai',
+
+ # NOTE! If you don't have an access to gpt4 - replace it with "openai-gpt3"
+ # default_teacher_runtime='openai-gpt4'
+)
+
+print(agent)
+print(agent.skills)
+
+agent.learn(learning_iterations=3, accuracy_threshold=0.95)
+
+print('\n=> Run tests ...')
+run = agent.apply_skills(predict_dataset)
+print('\n => Test results:')
+print(run)
+```
+
+## Reference
+
+- [**Agents**](agents.md) - main interface for interacting with environment
+- [**Datasets**](datasets.md) - data inputs for agents
+- [**Environments**](environments.md) - environments for agents, where it collects ground truth signal
+- [**Memories**](memories.md) - agent's memory for storing and retrieving data
+- [**Runtimes**](runtimes.md) - agent's execution runtime (e.g. LLMs providers)
+- [**Skills**](skills.md) - agent skills for data labeling
diff --git a/docs/src/memories.md b/docs/src/memories.md
new file mode 100644
index 00000000..cda68d25
--- /dev/null
+++ b/docs/src/memories.md
@@ -0,0 +1,4 @@
+
+::: adala.memories.base
+
+::: adala.memories.file_memory
diff --git a/docs/src/runtimes.md b/docs/src/runtimes.md
new file mode 100644
index 00000000..098f44fc
--- /dev/null
+++ b/docs/src/runtimes.md
@@ -0,0 +1,4 @@
+
+::: adala.runtimes.base
+
+::: adala.runtimes.openai
diff --git a/docs/src/skills.md b/docs/src/skills.md
new file mode 100644
index 00000000..3b770dce
--- /dev/null
+++ b/docs/src/skills.md
@@ -0,0 +1,14 @@
+
+::: adala.skills.base
+
+::: adala.skills.skillset
+
+::: adala.skills.generation.base
+
+::: adala.skills.generation.qa
+
+::: adala.skills.generation.summarization
+
+::: adala.skills.labeling.classification
+
+::: adala.skills.labeling.sequence_labeling
diff --git a/docs/src/utils.md b/docs/src/utils.md
new file mode 100644
index 00000000..6db4d6e2
--- /dev/null
+++ b/docs/src/utils.md
@@ -0,0 +1,2 @@
+
+::: adala.utils.internal_data
diff --git a/examples/classification_skill.ipynb b/examples/classification_skill.ipynb
new file mode 100644
index 00000000..f5acd9cb
--- /dev/null
+++ b/examples/classification_skill.ipynb
@@ -0,0 +1,907 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Classification skill"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " category | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Apple product with a sleek design. | \n",
+ " Electronics | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Laptop stand for the kitchen. | \n",
+ " Furniture/Home Decor | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Chocolate leather boots. | \n",
+ " Footwear/Clothing | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Wooden cream for surfaces. | \n",
+ " Furniture/Home Decor | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Natural finish for your lips. | \n",
+ " Beauty/Personal Care | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text category\n",
+ "0 Apple product with a sleek design. Electronics\n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor\n",
+ "2 Chocolate leather boots. Footwear/Clothing\n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor\n",
+ "4 Natural finish for your lips. Beauty/Personal Care"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " {\"text\": \"Apple product with a sleek design.\", \"category\": \"Electronics\"},\n",
+ " {\"text\": \"Laptop stand for the kitchen.\", \"category\": \"Furniture/Home Decor\"},\n",
+ " {\"text\": \"Chocolate leather boots.\", \"category\": \"Footwear/Clothing\"},\n",
+ " {\"text\": \"Wooden cream for surfaces.\", \"category\": \"Furniture/Home Decor\"},\n",
+ " {\"text\": \"Natural finish for your lips.\", \"category\": \"Beauty/Personal Care\"}\n",
+ "])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:00<00:00, 45.32it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "=> Iteration #0: Comparing to ground truth, analyzing and improving ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\n",
+ "\n",
+ "=> Iteration #\u001b[1;36m0\u001b[0m: Comparing to ground truth, analyzing and improving \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Comparing predictions to ground truth data ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Comparing predictions to ground truth data \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ " \n",
+ " text category product_category_clโฆ score category__x__produโฆ \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " Apple product with a Electronics Electronics {'Footwear/Clothingโฆ True \n",
+ " sleek design. -7.4104013, \n",
+ " 'Electronics': \n",
+ " -0.0006187928400000โฆ \n",
+ " 'Food/Beverages': \n",
+ " -12.842141, \n",
+ " 'Furniture/Home \n",
+ " Decor': -13.004693, \n",
+ " 'Beauty/Personal \n",
+ " Care': -11.657418} \n",
+ " Laptop stand for the Furniture/Home Decor Electronics {'Footwear/Clothingโฆ False \n",
+ " kitchen. -5.046604, \n",
+ " 'Electronics': \n",
+ " -0.0079155900000000โฆ \n",
+ " 'Food/Beverages': \n",
+ " -12.865566, \n",
+ " 'Furniture/Home \n",
+ " Decor': -6.5376244, \n",
+ " 'Beauty/Personal \n",
+ " Care': -12.865139} \n",
+ " Chocolate leather Footwear/Clothing Footwear/Clothing {'Footwear/Clothingโฆ True \n",
+ " boots. -0.0410600639999999โฆ \n",
+ " 'Electronics': \n",
+ " -11.113914, \n",
+ " 'Food/Beverages': \n",
+ " -3.3290619999999995, \n",
+ " 'Furniture/Home \n",
+ " Decor': -5.6644883, \n",
+ " 'Beauty/Personal \n",
+ " Care': -6.991324} \n",
+ " Wooden cream for Furniture/Home Decor Furniture/Home Decor {'Footwear/Clothingโฆ True \n",
+ " surfaces. -5.391147, \n",
+ " 'Electronics': \n",
+ " -12.019984, \n",
+ " 'Food/Beverages': \n",
+ " -8.380702, \n",
+ " 'Furniture/Home \n",
+ " Decor': \n",
+ " -0.0170855020000000โฆ \n",
+ " 'Beauty/Personal \n",
+ " Care': -4.4105563} \n",
+ " Natural finish for Beauty/Personal Care Beauty/Personal Care {'Footwear/Clothingโฆ True \n",
+ " your lips. -3.9375494, \n",
+ " 'Electronics': \n",
+ " -10.5326805, \n",
+ " 'Food/Beverages': \n",
+ " -9.545025, \n",
+ " 'Furniture/Home \n",
+ " Decor': -10.485508, \n",
+ " 'Beauty/Personal \n",
+ " Care': \n",
+ " -0.0198170879999999โฆ \n",
+ " \n",
+ "
\n"
+ ],
+ "text/plain": [
+ " \n",
+ " \u001b[1;35m \u001b[0m\u001b[1;35mtext \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mcategory \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mproduct_category_clโฆ\u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mscore \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mcategory__x__produโฆ\u001b[0m\u001b[1;35m \u001b[0m \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " Apple product with a Electronics Electronics {'Footwear/Clothingโฆ True \n",
+ " sleek design. -7.4104013, \n",
+ " 'Electronics': \n",
+ " -0.0006187928400000โฆ \n",
+ " 'Food/Beverages': \n",
+ " -12.842141, \n",
+ " 'Furniture/Home \n",
+ " Decor': -13.004693, \n",
+ " 'Beauty/Personal \n",
+ " Care': -11.657418} \n",
+ " \u001b[2m \u001b[0m\u001b[2mLaptop stand for the\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mElectronics \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m{'Footwear/Clothingโฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFalse \u001b[0m\u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m\u001b[2mkitchen. \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-5.046604, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Electronics': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-0.0079155900000000โฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Food/Beverages': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-12.865566, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Furniture/Home \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mDecor': -6.5376244, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Beauty/Personal \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mCare': -12.865139} \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " Chocolate leather Footwear/Clothing Footwear/Clothing {'Footwear/Clothingโฆ True \n",
+ " boots. -0.0410600639999999โฆ \n",
+ " 'Electronics': \n",
+ " -11.113914, \n",
+ " 'Food/Beverages': \n",
+ " -3.3290619999999995, \n",
+ " 'Furniture/Home \n",
+ " Decor': -5.6644883, \n",
+ " 'Beauty/Personal \n",
+ " Care': -6.991324} \n",
+ " \u001b[2m \u001b[0m\u001b[2mWooden cream for \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m{'Footwear/Clothingโฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mTrue \u001b[0m\u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m\u001b[2msurfaces. \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-5.391147, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Electronics': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-12.019984, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Food/Beverages': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-8.380702, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Furniture/Home \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mDecor': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-0.0170855020000000โฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Beauty/Personal \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mCare': -4.4105563} \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " Natural finish for Beauty/Personal Care Beauty/Personal Care {'Footwear/Clothingโฆ True \n",
+ " your lips. -3.9375494, \n",
+ " 'Electronics': \n",
+ " -10.5326805, \n",
+ " 'Food/Beverages': \n",
+ " -9.545025, \n",
+ " 'Furniture/Home \n",
+ " Decor': -10.485508, \n",
+ " 'Beauty/Personal \n",
+ " Care': \n",
+ " -0.0198170879999999โฆ \n",
+ " \n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Analyze evaluation experience ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Analyze evaluation experience \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโ| 1/1 [00:00<00:00, 137.21it/s]\n",
+ "100%|โโโโโโโโโโโโโโโโโ| 1/1 [00:04<00:00, 4.85s/it]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "Number of errors: 1\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Number of errors: \u001b[1;36m1\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy = 80.00%\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001b[1;31mAccuracy = \u001b[0m\u001b[1;36m80.00\u001b[0m\u001b[1;31m%\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Improve \"product_category_classification\" skill based on analysis ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Improve \u001b[32m\"product_category_classification\"\u001b[0m skill based on analysis \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Updated instructions for skill \"product_category_classification\":\n",
+ "\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Updated instructions for skill \u001b[32m\"product_category_classification\"\u001b[0m:\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Categorize the input text into one of the following labels: ['Footwear/Clothing', 'Electronics', 'Food/Beverages', \n",
+ "'Furniture/Home Decor', 'Beauty/Personal Care']. Choose the label that best represents the main category of the \n",
+ "input text.\n",
+ "\n",
+ "Examples:\n",
+ "\n",
+ "Input: Laptop stand for the kitchen.\n",
+ "Output: Furniture/Home Decor\n",
+ "\n",
+ "Input: Running shoes for men.\n",
+ "Output: Footwear/Clothing\n",
+ "\n",
+ "Input: Organic shampoo for dry hair.\n",
+ "Output: Beauty/Personal Care\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001b[1;32mCategorize the input text into one of the following labels: \u001b[0m\u001b[1;32m[\u001b[0m\u001b[32m'Footwear/Clothing'\u001b[0m\u001b[1;32m, \u001b[0m\u001b[32m'Electronics'\u001b[0m\u001b[1;32m, \u001b[0m\u001b[32m'Food/Beverages'\u001b[0m\u001b[1;32m, \u001b[0m\n",
+ "\u001b[32m'Furniture/Home Decor'\u001b[0m\u001b[1;32m, \u001b[0m\u001b[32m'Beauty/Personal Care'\u001b[0m\u001b[1;32m]\u001b[0m\u001b[1;32m. Choose the label that best represents the main category of the \u001b[0m\n",
+ "\u001b[1;32minput text.\u001b[0m\n",
+ "\n",
+ "\u001b[1;32mExamples:\u001b[0m\n",
+ "\n",
+ "\u001b[1;32mInput: Laptop stand for the kitchen.\u001b[0m\n",
+ "\u001b[1;32mOutput: Furniture/Home Decor\u001b[0m\n",
+ "\n",
+ "\u001b[1;32mInput: Running shoes for men.\u001b[0m\n",
+ "\u001b[1;32mOutput: Footwear/Clothing\u001b[0m\n",
+ "\n",
+ "\u001b[1;32mInput: Organic shampoo for dry hair.\u001b[0m\n",
+ "\u001b[1;32mOutput: Beauty/Personal Care\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Re-apply product_category_classification skill to dataset ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Re-apply product_category_classification skill to dataset \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:03<00:00, 1.48it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "=> Iteration #1: Comparing to ground truth, analyzing and improving ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\n",
+ "\n",
+ "=> Iteration #\u001b[1;36m1\u001b[0m: Comparing to ground truth, analyzing and improving \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Comparing predictions to ground truth data ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Comparing predictions to ground truth data \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ " \n",
+ " text category product_category_clโฆ score category__x__produโฆ \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " Apple product with a Electronics Electronics {'Footwear/Clothingโฆ True \n",
+ " sleek design. -13.669698, \n",
+ " 'Electronics': \n",
+ " -4.4849444000336985โฆ \n",
+ " 'Food/Beverages': \n",
+ " -14.937825, \n",
+ " 'Furniture/Home \n",
+ " Decor': -13.595754, \n",
+ " 'Beauty/Personal \n",
+ " Care': -13.327497} \n",
+ " Laptop stand for the Furniture/Home Decor Furniture/Home Decor {'Footwear/Clothingโฆ True \n",
+ " kitchen. -9.9471035, \n",
+ " 'Electronics': \n",
+ " -4.787397, \n",
+ " 'Food/Beverages': \n",
+ " -12.115164, \n",
+ " 'Furniture/Home \n",
+ " Decor': \n",
+ " -0.0084281690000000โฆ \n",
+ " 'Beauty/Personal \n",
+ " Care': -12.145201} \n",
+ " Chocolate leather Footwear/Clothing Footwear/Clothing {'Footwear/Clothingโฆ True \n",
+ " boots. -0.0003247375000000โฆ \n",
+ " 'Electronics': \n",
+ " -17.322811, \n",
+ " 'Food/Beverages': \n",
+ " -8.062444, \n",
+ " 'Furniture/Home \n",
+ " Decor': -12.040547, \n",
+ " 'Beauty/Personal \n",
+ " Care': -12.584134} \n",
+ " Wooden cream for Furniture/Home Decor Furniture/Home Decor {'Footwear/Clothingโฆ True \n",
+ " surfaces. -15.480099, \n",
+ " 'Electronics': \n",
+ " -17.015057, \n",
+ " 'Food/Beverages': \n",
+ " -13.499149, \n",
+ " 'Furniture/Home \n",
+ " Decor': \n",
+ " -0.0001718358800000โฆ \n",
+ " 'Beauty/Personal \n",
+ " Care': -8.679317} \n",
+ " Natural finish for Beauty/Personal Care Beauty/Personal Care {'Footwear/Clothingโฆ True \n",
+ " your lips. -11.842119, \n",
+ " 'Electronics': \n",
+ " -14.539164, \n",
+ " 'Food/Beverages': \n",
+ " -13.285265, \n",
+ " 'Furniture/Home \n",
+ " Decor': -14.923815, \n",
+ " 'Beauty/Personal \n",
+ " Care': \n",
+ " -9.72990600003512e-โฆ \n",
+ " \n",
+ "
\n"
+ ],
+ "text/plain": [
+ " \n",
+ " \u001b[1;35m \u001b[0m\u001b[1;35mtext \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mcategory \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mproduct_category_clโฆ\u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mscore \u001b[0m\u001b[1;35m \u001b[0m \u001b[1;35m \u001b[0m\u001b[1;35mcategory__x__produโฆ\u001b[0m\u001b[1;35m \u001b[0m \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " Apple product with a Electronics Electronics {'Footwear/Clothingโฆ True \n",
+ " sleek design. -13.669698, \n",
+ " 'Electronics': \n",
+ " -4.4849444000336985โฆ \n",
+ " 'Food/Beverages': \n",
+ " -14.937825, \n",
+ " 'Furniture/Home \n",
+ " Decor': -13.595754, \n",
+ " 'Beauty/Personal \n",
+ " Care': -13.327497} \n",
+ " \u001b[2m \u001b[0m\u001b[2mLaptop stand for the\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m{'Footwear/Clothingโฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mTrue \u001b[0m\u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m\u001b[2mkitchen. \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-9.9471035, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Electronics': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-4.787397, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Food/Beverages': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-12.115164, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Furniture/Home \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mDecor': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-0.0084281690000000โฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Beauty/Personal \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mCare': -12.145201} \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " Chocolate leather Footwear/Clothing Footwear/Clothing {'Footwear/Clothingโฆ True \n",
+ " boots. -0.0003247375000000โฆ \n",
+ " 'Electronics': \n",
+ " -17.322811, \n",
+ " 'Food/Beverages': \n",
+ " -8.062444, \n",
+ " 'Furniture/Home \n",
+ " Decor': -12.040547, \n",
+ " 'Beauty/Personal \n",
+ " Care': -12.584134} \n",
+ " \u001b[2m \u001b[0m\u001b[2mWooden cream for \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mFurniture/Home Decor\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m{'Footwear/Clothingโฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mTrue \u001b[0m\u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m\u001b[2msurfaces. \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-15.480099, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Electronics': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-17.015057, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Food/Beverages': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-13.499149, \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Furniture/Home \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mDecor': \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m-0.0001718358800000โฆ\u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2m'Beauty/Personal \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m \u001b[2m \u001b[0m\u001b[2mCare': -8.679317} \u001b[0m\u001b[2m \u001b[0m \u001b[2m \u001b[0m \n",
+ " Natural finish for Beauty/Personal Care Beauty/Personal Care {'Footwear/Clothingโฆ True \n",
+ " your lips. -11.842119, \n",
+ " 'Electronics': \n",
+ " -14.539164, \n",
+ " 'Food/Beverages': \n",
+ " -13.285265, \n",
+ " 'Furniture/Home \n",
+ " Decor': -14.923815, \n",
+ " 'Beauty/Personal \n",
+ " Care': \n",
+ " -9.72990600003512e-โฆ \n",
+ " \n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Analyze evaluation experience ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Analyze evaluation experience \u001b[33m...\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Number of errors: 0\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Number of errors: \u001b[1;36m0\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy = 100.00%\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001b[1;31mAccuracy = \u001b[0m\u001b[1;36m100.00\u001b[0m\u001b[1;31m%\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy threshold reached (1.0 >= 0.9)\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Accuracy threshold reached \u001b[1m(\u001b[0m\u001b[1;36m1.0\u001b[0m >= \u001b[1;36m0.9\u001b[0m\u001b[1m)\u001b[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Train is done!\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Train is done!\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": [
+ "ShortTermMemory(dataset=DataFrameDataset(df= text category\n",
+ "0 Apple product with a sleek design. Electronics\n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor\n",
+ "2 Chocolate leather boots. Footwear/Clothing\n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor\n",
+ "4 Natural finish for your lips. Beauty/Personal Care), predictions= text category \\\n",
+ "0 Apple product with a sleek design. Electronics \n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor \n",
+ "2 Chocolate leather boots. Footwear/Clothing \n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor \n",
+ "4 Natural finish for your lips. Beauty/Personal Care \n",
+ "\n",
+ " product_category_classification \\\n",
+ "0 Electronics \n",
+ "1 Furniture/Home Decor \n",
+ "2 Footwear/Clothing \n",
+ "3 Furniture/Home Decor \n",
+ "4 Beauty/Personal Care \n",
+ "\n",
+ " score \n",
+ "0 {'Footwear/Clothing': -13.669698, 'Electronics... \n",
+ "1 {'Footwear/Clothing': -9.9471035, 'Electronics... \n",
+ "2 {'Footwear/Clothing': -0.0003247375000000436, ... \n",
+ "3 {'Footwear/Clothing': -15.480099, 'Electronics... \n",
+ "4 {'Footwear/Clothing': -11.842119, 'Electronics... , evaluations= text category \\\n",
+ "0 Apple product with a sleek design. Electronics \n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor \n",
+ "2 Chocolate leather boots. Footwear/Clothing \n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor \n",
+ "4 Natural finish for your lips. Beauty/Personal Care \n",
+ "\n",
+ " product_category_classification \\\n",
+ "0 Electronics \n",
+ "1 Furniture/Home Decor \n",
+ "2 Footwear/Clothing \n",
+ "3 Furniture/Home Decor \n",
+ "4 Beauty/Personal Care \n",
+ "\n",
+ " score \\\n",
+ "0 {'Footwear/Clothing': -13.669698, 'Electronics... \n",
+ "1 {'Footwear/Clothing': -9.9471035, 'Electronics... \n",
+ "2 {'Footwear/Clothing': -0.0003247375000000436, ... \n",
+ "3 {'Footwear/Clothing': -15.480099, 'Electronics... \n",
+ "4 {'Footwear/Clothing': -11.842119, 'Electronics... \n",
+ "\n",
+ " category__x__product_category_classification \n",
+ "0 True \n",
+ "1 True \n",
+ "2 True \n",
+ "3 True \n",
+ "4 True , ground_truth_column_name='category', match_column_name='category__x__product_category_classification', errors=Empty DataFrame\n",
+ "Columns: [text, category, product_category_classification, score, category__x__product_category_classification]\n",
+ "Index: [], accuracy=1.0, initial_instructions='Label the input text with the following labels: {{labels}}', updated_instructions=\"Categorize the input text into one of the following labels: ['Footwear/Clothing', 'Electronics', 'Food/Beverages', 'Furniture/Home Decor', 'Beauty/Personal Care']. Choose the label that best represents the main category of the input text.\\n\\nExamples:\\n\\nInput: Laptop stand for the kitchen.\\nOutput: Furniture/Home Decor\\n\\nInput: Running shoes for men.\\nOutput: Footwear/Clothing\\n\\nInput: Organic shampoo for dry hair.\\nOutput: Beauty/Personal Care\")"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.environments import BasicEnvironment\n",
+ "from adala.skills.labeling.classification import ClassificationSkill\n",
+ "\n",
+ "agent = Agent(\n",
+ " skills=ClassificationSkill(\n",
+ " name='product_category_classification',\n",
+ " input_data_field='text',\n",
+ " labels=[\n",
+ " \"Footwear/Clothing\",\n",
+ " \"Electronics\",\n",
+ " \"Food/Beverages\",\n",
+ " \"Furniture/Home Decor\",\n",
+ " \"Beauty/Personal Care\"\n",
+ " ],\n",
+ " ),\n",
+ " environment=BasicEnvironment(\n",
+ " ground_truth_dataset=df,\n",
+ " ground_truth_column='category'\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "agent.learn()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "4a876f3d",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "Total Agent Skills: 1\n",
+ "\n",
+ "product_category_classification\n",
+ "Categorize the input text into one of the following labels: ['Footwear/Clothing', 'Electronics', 'Food/Beverages', \n",
+ "'Furniture/Home Decor', 'Beauty/Personal Care']. Choose the label that best represents the main category of the \n",
+ "input text.\n",
+ "\n",
+ "Examples:\n",
+ "\n",
+ "Input: Laptop stand for the kitchen.\n",
+ "Output: Furniture/Home Decor\n",
+ "\n",
+ "Input: Running shoes for men.\n",
+ "Output: Footwear/Clothing\n",
+ "\n",
+ "Input: Organic shampoo for dry hair.\n",
+ "Output: Beauty/Personal Care\n",
+ "\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001b[1;34mTotal Agent Skills: \u001b[0m\u001b[1;34m1\u001b[0m\n",
+ "\n",
+ "\u001b[1;4;32mproduct_category_classification\u001b[0m\n",
+ "\u001b[32mCategorize the input text into one of the following labels: \u001b[0m\u001b[1;32m[\u001b[0m\u001b[32m'Footwear/Clothing'\u001b[0m\u001b[32m, \u001b[0m\u001b[32m'Electronics'\u001b[0m\u001b[32m, \u001b[0m\u001b[32m'Food/Beverages'\u001b[0m\u001b[32m, \u001b[0m\n",
+ "\u001b[32m'Furniture/Home Decor'\u001b[0m\u001b[32m, \u001b[0m\u001b[32m'Beauty/Personal Care'\u001b[0m\u001b[1;32m]\u001b[0m\u001b[32m. Choose the label that best represents the main category of the \u001b[0m\n",
+ "\u001b[32minput text.\u001b[0m\n",
+ "\n",
+ "\u001b[32mExamples:\u001b[0m\n",
+ "\n",
+ "\u001b[32mInput: Laptop stand for the kitchen.\u001b[0m\n",
+ "\u001b[32mOutput: Furniture/Home Decor\u001b[0m\n",
+ "\n",
+ "\u001b[32mInput: Running shoes for men.\u001b[0m\n",
+ "\u001b[32mOutput: Footwear/Clothing\u001b[0m\n",
+ "\n",
+ "\u001b[32mInput: Organic shampoo for dry hair.\u001b[0m\n",
+ "\u001b[32mOutput: Beauty/Personal Care\u001b[0m\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from rich import print\n",
+ "\n",
+ "print(agent.skills)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "ee97ee22",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:02<00:00, 2.37it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_df = pd.DataFrame([\n",
+ " \"Stainless steel apple peeler.\", # Potential categories: Electronics or Food/Beverages\n",
+ " \"Silk finish touch screen.\", # Potential categories: Electronics or Beauty/Personal Care\n",
+ " \"Chocolate coated boots.\", # Potential categories: Footwear/Clothing or Food/Beverages\n",
+ " \"Natural wood fragrance.\", # Potential categories: Beauty/Personal Care or Furniture/Home Decor\n",
+ " \"Leather grain snack bar.\" # Potential categories: Footwear/Clothing or Food/Beverages\n",
+ "], columns=['text'])\n",
+ "\n",
+ "run = agent.apply_skills(test_df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "03cce2a7",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " product_category_classification | \n",
+ " score | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Stainless steel apple peeler. | \n",
+ " Food/Beverages | \n",
+ " {'Footwear/Clothing': -5.903179, 'Electronics'... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Silk finish touch screen. | \n",
+ " Electronics | \n",
+ " {'Footwear/Clothing': -11.517515, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Chocolate coated boots. | \n",
+ " Footwear/Clothing | \n",
+ " {'Footwear/Clothing': -0.074807025, 'Electroni... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Natural wood fragrance. | \n",
+ " Furniture/Home Decor | \n",
+ " {'Footwear/Clothing': -15.117043, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Leather grain snack bar. | \n",
+ " Food/Beverages | \n",
+ " {'Footwear/Clothing': -9.763915, 'Electronics'... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text product_category_classification \\\n",
+ "0 Stainless steel apple peeler. Food/Beverages \n",
+ "1 Silk finish touch screen. Electronics \n",
+ "2 Chocolate coated boots. Footwear/Clothing \n",
+ "3 Natural wood fragrance. Furniture/Home Decor \n",
+ "4 Leather grain snack bar. Food/Beverages \n",
+ "\n",
+ " score \n",
+ "0 {'Footwear/Clothing': -5.903179, 'Electronics'... \n",
+ "1 {'Footwear/Clothing': -11.517515, 'Electronics... \n",
+ "2 {'Footwear/Clothing': -0.074807025, 'Electroni... \n",
+ "3 {'Footwear/Clothing': -15.117043, 'Electronics... \n",
+ "4 {'Footwear/Clothing': -9.763915, 'Electronics'... "
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/classification_skill_with_CoT.ipynb b/examples/classification_skill_with_CoT.ipynb
new file mode 100644
index 00000000..d6e2a613
--- /dev/null
+++ b/examples/classification_skill_with_CoT.ipynb
@@ -0,0 +1,263 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Classification skill with Chain-of-Thoughts"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " category | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Apple product with a sleek design. | \n",
+ " Electronics | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Laptop stand for the kitchen. | \n",
+ " Furniture/Home Decor | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Chocolate leather boots. | \n",
+ " Footwear/Clothing | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Wooden cream for surfaces. | \n",
+ " Furniture/Home Decor | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Natural finish for your lips. | \n",
+ " Beauty/Personal Care | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text category\n",
+ "0 Apple product with a sleek design. Electronics\n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor\n",
+ "2 Chocolate leather boots. Footwear/Clothing\n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor\n",
+ "4 Natural finish for your lips. Beauty/Personal Care"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " {\"text\": \"Apple product with a sleek design.\", \"category\": \"Electronics\"},\n",
+ " {\"text\": \"Laptop stand for the kitchen.\", \"category\": \"Furniture/Home Decor\"},\n",
+ " {\"text\": \"Chocolate leather boots.\", \"category\": \"Footwear/Clothing\"},\n",
+ " {\"text\": \"Wooden cream for surfaces.\", \"category\": \"Furniture/Home Decor\"},\n",
+ " {\"text\": \"Natural finish for your lips.\", \"category\": \"Beauty/Personal Care\"}\n",
+ "])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:00<00:00, 7.41it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " category | \n",
+ " rationale | \n",
+ " product_category_classification | \n",
+ " score | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Apple product with a sleek design. | \n",
+ " Electronics | \n",
+ " \\nElectronics - The input mentions an \"Apple p... | \n",
+ " Electronics | \n",
+ " {'Footwear/Clothing': -10.126126, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Laptop stand for the kitchen. | \n",
+ " Furniture/Home Decor | \n",
+ " \\nElectronics - The input mentions a specific ... | \n",
+ " Electronics | \n",
+ " {'Footwear/Clothing': -11.511877, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Chocolate leather boots. | \n",
+ " Footwear/Clothing | \n",
+ " \\nFootwear/Clothing - The input text mentions ... | \n",
+ " Footwear/Clothing | \n",
+ " {'Footwear/Clothing': -0.05364191500000001, 'E... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Wooden cream for surfaces. | \n",
+ " Furniture/Home Decor | \n",
+ " \\nLabel: Furniture/Home Decor\\nRationale: The ... | \n",
+ " Furniture/Home Decor | \n",
+ " {'Footwear/Clothing': -12.323324, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Natural finish for your lips. | \n",
+ " Beauty/Personal Care | \n",
+ " \\nLabel: Beauty/Personal Care\\n\\nRationale: Th... | \n",
+ " Beauty/Personal Care | \n",
+ " {'Footwear/Clothing': -8.5240965, 'Electronics... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text category \\\n",
+ "0 Apple product with a sleek design. Electronics \n",
+ "1 Laptop stand for the kitchen. Furniture/Home Decor \n",
+ "2 Chocolate leather boots. Footwear/Clothing \n",
+ "3 Wooden cream for surfaces. Furniture/Home Decor \n",
+ "4 Natural finish for your lips. Beauty/Personal Care \n",
+ "\n",
+ " rationale \\\n",
+ "0 \\nElectronics - The input mentions an \"Apple p... \n",
+ "1 \\nElectronics - The input mentions a specific ... \n",
+ "2 \\nFootwear/Clothing - The input text mentions ... \n",
+ "3 \\nLabel: Furniture/Home Decor\\nRationale: The ... \n",
+ "4 \\nLabel: Beauty/Personal Care\\n\\nRationale: Th... \n",
+ "\n",
+ " product_category_classification \\\n",
+ "0 Electronics \n",
+ "1 Electronics \n",
+ "2 Footwear/Clothing \n",
+ "3 Furniture/Home Decor \n",
+ "4 Beauty/Personal Care \n",
+ "\n",
+ " score \n",
+ "0 {'Footwear/Clothing': -10.126126, 'Electronics... \n",
+ "1 {'Footwear/Clothing': -11.511877, 'Electronics... \n",
+ "2 {'Footwear/Clothing': -0.05364191500000001, 'E... \n",
+ "3 {'Footwear/Clothing': -12.323324, 'Electronics... \n",
+ "4 {'Footwear/Clothing': -8.5240965, 'Electronics... "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.skills.labeling.classification import ClassificationSkillWithCoT\n",
+ "\n",
+ "agent = Agent(\n",
+ " skills=ClassificationSkillWithCoT(\n",
+ " name='product_category_classification',\n",
+ " input_data_field='text',\n",
+ " labels=[\n",
+ " \"Footwear/Clothing\",\n",
+ " \"Electronics\",\n",
+ " \"Food/Beverages\",\n",
+ " \"Furniture/Home Decor\",\n",
+ " \"Beauty/Personal Care\"\n",
+ " ],\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "run = agent.apply_skills(df)\n",
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/question_answering_skill.ipynb b/examples/question_answering_skill.ipynb
new file mode 100644
index 00000000..dd517618
--- /dev/null
+++ b/examples/question_answering_skill.ipynb
@@ -0,0 +1,244 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Question-answering skill"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " question | \n",
+ " expected_answer | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " In quantum mechanics, what principle asserts t... | \n",
+ " Heisenberg Uncertainty Principle | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Which famous poet wrote 'The Love Song of J. A... | \n",
+ " T.S. Eliot | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " What mathematical theorem states that in any r... | \n",
+ " Pythagorean Theorem | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Which philosophical paradox involves a ship wh... | \n",
+ " Ship of Theseus | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " In the world of programming, what is the desig... | \n",
+ " Open/Closed Principle | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " question \\\n",
+ "0 In quantum mechanics, what principle asserts t... \n",
+ "1 Which famous poet wrote 'The Love Song of J. A... \n",
+ "2 What mathematical theorem states that in any r... \n",
+ "3 Which philosophical paradox involves a ship wh... \n",
+ "4 In the world of programming, what is the desig... \n",
+ "\n",
+ " expected_answer \n",
+ "0 Heisenberg Uncertainty Principle \n",
+ "1 T.S. Eliot \n",
+ "2 Pythagorean Theorem \n",
+ "3 Ship of Theseus \n",
+ "4 Open/Closed Principle "
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " {\"question\": \"In quantum mechanics, what principle asserts that it's impossible to simultaneously know the exact position and momentum of a particle?\", \"expected_answer\": \"Heisenberg Uncertainty Principle\"},\n",
+ " {\"question\": \"Which famous poet wrote 'The Love Song of J. Alfred Prufrock'?\", \"expected_answer\": \"T.S. Eliot\"},\n",
+ " {\"question\": \"What mathematical theorem states that in any right-angled triangle, the area of the square whose side is the hypotenuse is equal to the sum of the areas of the squares whose sides are the two legs?\", \"expected_answer\": \"Pythagorean Theorem\"},\n",
+ " {\"question\": \"Which philosophical paradox involves a ship where all of its wooden parts are replaced with metal parts?\", \"expected_answer\": \"Ship of Theseus\"},\n",
+ " {\"question\": \"In the world of programming, what is the design principle that suggests a system should be open for extension but closed for modification?\", \"expected_answer\": \"Open/Closed Principle\"}\n",
+ "])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:02<00:00, 1.91it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " question | \n",
+ " expected_answer | \n",
+ " qa_skill | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " In quantum mechanics, what principle asserts t... | \n",
+ " Heisenberg Uncertainty Principle | \n",
+ " The Heisenberg uncertainty principle. | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Which famous poet wrote 'The Love Song of J. A... | \n",
+ " T.S. Eliot | \n",
+ " T.S. Eliot | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " What mathematical theorem states that in any r... | \n",
+ " Pythagorean Theorem | \n",
+ " The Pythagorean theorem. | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Which philosophical paradox involves a ship wh... | \n",
+ " Ship of Theseus | \n",
+ " The Ship of Theseus paradox. | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " In the world of programming, what is the desig... | \n",
+ " Open/Closed Principle | \n",
+ " The design principle is called the Open-Closed... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " question \\\n",
+ "0 In quantum mechanics, what principle asserts t... \n",
+ "1 Which famous poet wrote 'The Love Song of J. A... \n",
+ "2 What mathematical theorem states that in any r... \n",
+ "3 Which philosophical paradox involves a ship wh... \n",
+ "4 In the world of programming, what is the desig... \n",
+ "\n",
+ " expected_answer \\\n",
+ "0 Heisenberg Uncertainty Principle \n",
+ "1 T.S. Eliot \n",
+ "2 Pythagorean Theorem \n",
+ "3 Ship of Theseus \n",
+ "4 Open/Closed Principle \n",
+ "\n",
+ " qa_skill \n",
+ "0 The Heisenberg uncertainty principle. \n",
+ "1 T.S. Eliot \n",
+ "2 The Pythagorean theorem. \n",
+ "3 The Ship of Theseus paradox. \n",
+ "4 The design principle is called the Open-Closed... "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.skills.generation.qa import QuestionAnsweringSkill\n",
+ "\n",
+ "agent = Agent(\n",
+ " skills=QuestionAnsweringSkill(\n",
+ " name='qa_skill',\n",
+ " input_data_field='question'\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "run = agent.apply_skills(df)\n",
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/quickstart.ipynb b/examples/quickstart.ipynb
new file mode 100644
index 00000000..3eab4978
--- /dev/null
+++ b/examples/quickstart.ipynb
@@ -0,0 +1,1264 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "a6c119c3",
+ "metadata": {},
+ "source": [
+ "# ADALA Quickstart\n",
+ "\n",
+ "In this notebook, we are going to run through some of the common tasks for creating data labeling agents with ADALA. In this example, we're going to create a data labeling agent for a text classification task - labeling our text samples as either \"Subjective or \"Objective\" statements. \n",
+ "\n",
+ "This agent will be LLM-based, so we will use [OpenAI's API](https://platform.openai.com/). You will to generate an API key and set it as an environment variable as follows: \n",
+ "\n",
+ "```\n",
+ "export OPENAI_API_KEY=your_openai_api_key\n",
+ "```\n",
+ "\n",
+ "Now, let's begin. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "55c19afc",
+ "metadata": {},
+ "source": [
+ "## Dataset Creation\n",
+ "First, let's use a dataset of product reviews stored in pandas dataframe. This will help us manage our data as we add more attributes, like predictions and labels for subjectivity and objectivity over time. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "5d5b37a3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " ground_truth | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " The mic is great. | \n",
+ " Subjective | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Will order from them again! | \n",
+ " Subjective | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Not loud enough and doesn't turn on like it sh... | \n",
+ " Objective | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " The phone doesn't seem to accept anything exce... | \n",
+ " Objective | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " All three broke within two months of use. | \n",
+ " Objective | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text ground_truth\n",
+ "0 The mic is great. Subjective\n",
+ "1 Will order from them again! Subjective\n",
+ "2 Not loud enough and doesn't turn on like it sh... Objective\n",
+ "3 The phone doesn't seem to accept anything exce... Objective\n",
+ "4 All three broke within two months of use. Objective"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "\n",
+ "df = pd.DataFrame([\n",
+ " [\"The mic is great.\", \"Subjective\"],\n",
+ " [\"Will order from them again!\", \"Subjective\"],\n",
+ " [\"Not loud enough and doesn't turn on like it should.\", \"Objective\"],\n",
+ " [\"The phone doesn't seem to accept anything except CBR mp3s\", \"Objective\"],\n",
+ " [\"All three broke within two months of use.\", \"Objective\"]\n",
+ "], columns=[\"text\", \"ground_truth\"])\n",
+ "\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9ce6651b",
+ "metadata": {},
+ "source": [
+ "We instantiate Dataset that uses this pandas dataframe as a data source. Dataset object takes care of input data schema and data streaming:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "93a31f60",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from adala.datasets import DataFrameDataset\n",
+ "\n",
+ "dataset = DataFrameDataset(df=df)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0dc201b3",
+ "metadata": {},
+ "source": [
+ "## Create Agent\n",
+ "\n",
+ "To create Agent, we need to to define 2 things:\n",
+ "\n",
+ "**Skills** - Agent's abilities are defined as _Skills_. Each agent can possess many different skills. In our case, this agent only has one labeling skill, to produce a classification of Subjective or Objective for a given piece of text. To define this skill, we will leverage an LLM, passing it instructions and the set of labeles we expect to receive back. \n",
+ "\n",
+ "**Environment** - that is where the Agent receives ground truth signal to improve its skill. Since we already created ground truth dataset, we can simply refer to the column from the dataframe. In the real world scenario, you may consider using a different environment where ground truth signal can be obtained asynchoronously by gathering real human feedback during agent's learning phase."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "a1310fce",
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "Agent Instance\n",
+ "\n",
+ "Environment: BasicEnvironment\n",
+ "Skills: subjectivity_detection\n",
+ "Runtimes: openai, openai-gpt3, openai-gpt4\n",
+ "Default Runtime: openai\n",
+ "Default Teacher Runtime: openai-gpt4\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;34mAgent Instance\u001B[0m\n",
+ "\n",
+ "Environment: BasicEnvironment\n",
+ "Skills: subjectivity_detection\n",
+ "Runtimes: openai, openai-gpt3, openai-gpt4\n",
+ "Default Runtime: openai\n",
+ "Default Teacher Runtime: openai-gpt4\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.environments import BasicEnvironment\n",
+ "from adala.skills import ClassificationSkill\n",
+ "from adala.runtimes import OpenAIRuntime\n",
+ "from rich import print\n",
+ "\n",
+ "\n",
+ "agent = Agent(\n",
+ " # define the agent's labeling skill that should classify text onto 2 categories\n",
+ " skills=ClassificationSkill(\n",
+ " name='subjectivity_detection',\n",
+ " description='Understanding subjective and objective statements from text.',\n",
+ " instructions='Classify a product review as either expressing \"Subjective\" or \"Objective\" statements.',\n",
+ " labels=['Subjective', 'Objective'],\n",
+ " input_data_field='text'\n",
+ " ),\n",
+ " \n",
+ " # basic environment extracts ground truth signal from the input records\n",
+ " environment=BasicEnvironment(\n",
+ " ground_truth_dataset=dataset,\n",
+ " ground_truth_column='ground_truth'\n",
+ " ),\n",
+ " \n",
+ " runtimes = {\n",
+ " # You can specify your OPENAI API KEY here via `OpenAIRuntime(..., api_key='your-api-key')`\n",
+ " 'openai': OpenAIRuntime(model='gpt-3.5-turbo-instruct'),\n",
+ " 'openai-gpt3': OpenAIRuntime(model='gpt-3.5-turbo'),\n",
+ " 'openai-gpt4': OpenAIRuntime(model='gpt-4'),\n",
+ " },\n",
+ " default_runtime='openai',\n",
+ " \n",
+ " # NOTE! If you don't have an access to gpt4 - replace it with \"openai-gpt3\"\n",
+ " default_teacher_runtime='openai-gpt4'\n",
+ ")\n",
+ "\n",
+ "print(agent)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8340dde8",
+ "metadata": {},
+ "source": [
+ "## Learning Agent\n",
+ "\n",
+ "We will now let Agent learn from the ground truth. After every action, Agent returns its _Experience_, where it stores various observations like predicted data, errors, accuracy, etc."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "666c8d0f",
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:00<00:00, 39.18it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "=> Iteration #0: Comparing to ground truth, analyzing and improving ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\n",
+ "\n",
+ "=> Iteration #\u001B[1;36m0\u001B[0m: Comparing to ground truth, analyzing and improving \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Comparing predictions to ground truth data ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Comparing predictions to ground truth data \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ " \n",
+ " text ground_truth subjectivity_detection score ground_truth__x__subโฆ \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Subjective {'Subjective': True \n",
+ " -0.02697588099999997, \n",
+ " 'Objective': \n",
+ " -3.6262724} \n",
+ " Will order from them Subjective Subjective {'Subjective': True \n",
+ " again! -0.11282212000000001, \n",
+ " 'Objective': \n",
+ " -2.2378219999999995} \n",
+ " Not loud enough and Objective Subjective {'Subjective': False \n",
+ " doesn't turn on like -0.014163457000000034, \n",
+ " it should. 'Objective': \n",
+ " -4.2641635} \n",
+ " The phone doesn't seem Objective Objective {'Subjective': True \n",
+ " to accept anything -2.0720863, \n",
+ " except CBR mp3s 'Objective': \n",
+ " -0.13458653999999995} \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -2.1821797, \n",
+ " 'Objective': \n",
+ " -0.11967964500000007} \n",
+ " \n",
+ "
\n"
+ ],
+ "text/plain": [
+ " \n",
+ " \u001B[1;35m \u001B[0m\u001B[1;35mtext \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35msubjectivity_detection\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mscore \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth__x__subโฆ\u001B[0m\u001B[1;35m \u001B[0m \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Subjective {'Subjective': True \n",
+ " -0.02697588099999997, \n",
+ " 'Objective': \n",
+ " -3.6262724} \n",
+ " \u001B[2m \u001B[0m\u001B[2mWill order from them \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mSubjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mSubjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mTrue \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2magain! \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.11282212000000001, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-2.2378219999999995} \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " Not loud enough and Objective Subjective {'Subjective': False \n",
+ " doesn't turn on like -0.014163457000000034, \n",
+ " it should. 'Objective': \n",
+ " -4.2641635} \n",
+ " \u001B[2m \u001B[0m\u001B[2mThe phone doesn't seem\u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mTrue \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mto accept anything \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-2.0720863, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mexcept CBR mp3s \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.13458653999999995} \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -2.1821797, \n",
+ " 'Objective': \n",
+ " -0.11967964500000007} \n",
+ " \n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Analyze evaluation experience ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Analyze evaluation experience \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโ| 1/1 [00:00<00:00, 153.47it/s]\n",
+ "100%|โโโโโโโโโโโโโโโโโ| 1/1 [00:00<00:00, 31.21it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "Number of errors: 1\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Number of errors: \u001B[1;36m1\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy = 80.00%\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;31mAccuracy = \u001B[0m\u001B[1;36m80.00\u001B[0m\u001B[1;31m%\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Improve \"subjectivity_detection\" skill based on analysis ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Improve \u001B[32m\"subjectivity_detection\"\u001B[0m skill based on analysis \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Updated instructions for skill \"subjectivity_detection\":\n",
+ "\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Updated instructions for skill \u001B[32m\"subjectivity_detection\"\u001B[0m:\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Determine whether the given product review contains \"Subjective\" (based on personal feelings, tastes, or opinions) \n",
+ "or \"Objective\" (based on facts) statements.\n",
+ "\n",
+ "Examples:\n",
+ "\n",
+ "Input: Not loud enough and doesn't turn on like it should.\n",
+ "Output: Objective\n",
+ "\n",
+ "Input: I personally think the sound quality is not up to the mark.\n",
+ "Output: Subjective\n",
+ "\n",
+ "Input: The phone's battery lasts for 10 hours.\n",
+ "Output: Objective\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;32mDetermine whether the given product review contains \u001B[0m\u001B[32m\"Subjective\"\u001B[0m\u001B[1;32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[1;32mbased on personal feelings, tastes, or opinions\u001B[0m\u001B[1;32m)\u001B[0m\u001B[1;32m \u001B[0m\n",
+ "\u001B[1;32mor \u001B[0m\u001B[32m\"Objective\"\u001B[0m\u001B[1;32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[1;32mbased on facts\u001B[0m\u001B[1;32m)\u001B[0m\u001B[1;32m statements.\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mExamples:\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: Not loud enough and doesn't turn on like it should.\u001B[0m\n",
+ "\u001B[1;32mOutput: Objective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: I personally think the sound quality is not up to the mark.\u001B[0m\n",
+ "\u001B[1;32mOutput: Subjective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: The phone's battery lasts for \u001B[0m\u001B[1;36m10\u001B[0m\u001B[1;32m hours.\u001B[0m\n",
+ "\u001B[1;32mOutput: Objective\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Re-apply subjectivity_detection skill to dataset ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Re-apply subjectivity_detection skill to dataset \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:00<00:00, 48.32it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "=> Iteration #1: Comparing to ground truth, analyzing and improving ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\n",
+ "\n",
+ "=> Iteration #\u001B[1;36m1\u001B[0m: Comparing to ground truth, analyzing and improving \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Comparing predictions to ground truth data ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Comparing predictions to ground truth data \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ " \n",
+ " text ground_truth subjectivity_detection score ground_truth__x__subโฆ \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Objective {'Subjective': False \n",
+ " -2.2253392, \n",
+ " 'Objective': \n",
+ " -0.11432376000000005} \n",
+ " Will order from them Subjective Objective {'Subjective': False \n",
+ " again! -0.8573844400000001, \n",
+ " 'Objective': \n",
+ " -0.5521171} \n",
+ " Not loud enough and Objective Objective {'Subjective': True \n",
+ " doesn't turn on like -4.0895286, \n",
+ " it should. 'Objective': \n",
+ " -0.01688896000000003} \n",
+ " The phone doesn't seem Objective Objective {'Subjective': True \n",
+ " to accept anything -2.8614092, \n",
+ " except CBR mp3s 'Objective': \n",
+ " -0.058888500000000066} \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -4.7739024, \n",
+ " 'Objective': \n",
+ " -0.008483256000000052} \n",
+ " \n",
+ "
\n"
+ ],
+ "text/plain": [
+ " \n",
+ " \u001B[1;35m \u001B[0m\u001B[1;35mtext \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35msubjectivity_detection\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mscore \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth__x__subโฆ\u001B[0m\u001B[1;35m \u001B[0m \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Objective {'Subjective': False \n",
+ " -2.2253392, \n",
+ " 'Objective': \n",
+ " -0.11432376000000005} \n",
+ " \u001B[2m \u001B[0m\u001B[2mWill order from them \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mSubjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mFalse \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2magain! \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.8573844400000001, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.5521171} \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " Not loud enough and Objective Objective {'Subjective': True \n",
+ " doesn't turn on like -4.0895286, \n",
+ " it should. 'Objective': \n",
+ " -0.01688896000000003} \n",
+ " \u001B[2m \u001B[0m\u001B[2mThe phone doesn't seem\u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mTrue \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mto accept anything \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-2.8614092, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mexcept CBR mp3s \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.058888500000000066}\u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -4.7739024, \n",
+ " 'Objective': \n",
+ " -0.008483256000000052} \n",
+ " \n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Analyze evaluation experience ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Analyze evaluation experience \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโ| 2/2 [00:00<00:00, 229.64it/s]\n",
+ "100%|โโโโโโโโโโโโโโโโโ| 2/2 [00:00<00:00, 24.71it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "Number of errors: 2\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Number of errors: \u001B[1;36m2\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy = 60.00%\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;31mAccuracy = \u001B[0m\u001B[1;36m60.00\u001B[0m\u001B[1;31m%\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Improve \"subjectivity_detection\" skill based on analysis ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Improve \u001B[32m\"subjectivity_detection\"\u001B[0m skill based on analysis \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Updated instructions for skill \"subjectivity_detection\":\n",
+ "\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Updated instructions for skill \u001B[32m\"subjectivity_detection\"\u001B[0m:\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Identify if the provided product review is \"Subjective\" (expressing personal feelings, tastes, or opinions) or \n",
+ "\"Objective\" (based on factual information). Consider a statement as subjective if it reflects personal judgment or \n",
+ "preference, and as objective if it states verifiable facts or features.\n",
+ "\n",
+ "Examples:\n",
+ "\n",
+ "Input: Not loud enough and doesn't turn on like it should.\n",
+ "Output: Objective\n",
+ "\n",
+ "Input: I personally think the sound quality is not up to the mark.\n",
+ "Output: Subjective\n",
+ "\n",
+ "Input: The phone's battery lasts for 10 hours.\n",
+ "Output: Objective\n",
+ "\n",
+ "Input: The mic is great.\n",
+ "Output: Subjective\n",
+ "\n",
+ "Input: Will order from them again!\n",
+ "Output: Subjective\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;32mIdentify if the provided product review is \u001B[0m\u001B[32m\"Subjective\"\u001B[0m\u001B[1;32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[1;32mexpressing personal feelings, tastes, or opinions\u001B[0m\u001B[1;32m)\u001B[0m\u001B[1;32m or \u001B[0m\n",
+ "\u001B[32m\"Objective\"\u001B[0m\u001B[1;32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[1;32mbased on factual information\u001B[0m\u001B[1;32m)\u001B[0m\u001B[1;32m. Consider a statement as subjective if it reflects personal judgment or \u001B[0m\n",
+ "\u001B[1;32mpreference, and as objective if it states verifiable facts or features.\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mExamples:\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: Not loud enough and doesn't turn on like it should.\u001B[0m\n",
+ "\u001B[1;32mOutput: Objective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: I personally think the sound quality is not up to the mark.\u001B[0m\n",
+ "\u001B[1;32mOutput: Subjective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: The phone's battery lasts for \u001B[0m\u001B[1;36m10\u001B[0m\u001B[1;32m hours.\u001B[0m\n",
+ "\u001B[1;32mOutput: Objective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: The mic is great.\u001B[0m\n",
+ "\u001B[1;32mOutput: Subjective\u001B[0m\n",
+ "\n",
+ "\u001B[1;32mInput: Will order from them again!\u001B[0m\n",
+ "\u001B[1;32mOutput: Subjective\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Re-apply subjectivity_detection skill to dataset ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Re-apply subjectivity_detection skill to dataset \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 5/5 [00:00<00:00, 35.93it/s]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "=> Iteration #2: Comparing to ground truth, analyzing and improving ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\n",
+ "\n",
+ "=> Iteration #\u001B[1;36m2\u001B[0m: Comparing to ground truth, analyzing and improving \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Comparing predictions to ground truth data ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Comparing predictions to ground truth data \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ " \n",
+ " text ground_truth subjectivity_detection score ground_truth__x__subโฆ \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Subjective {'Subjective': True \n",
+ " -0.022607480000000055, \n",
+ " 'Objective': -3.80076} \n",
+ " Will order from them Subjective Subjective {'Subjective': True \n",
+ " again! -0.05627503599999997, \n",
+ " 'Objective': \n",
+ " -2.9055107} \n",
+ " Not loud enough and Objective Objective {'Subjective': True \n",
+ " doesn't turn on like -2.897738, \n",
+ " it should. 'Objective': \n",
+ " -0.05672692499999995} \n",
+ " The phone doesn't seem Objective Objective {'Subjective': True \n",
+ " to accept anything -3.8168292, \n",
+ " except CBR mp3s 'Objective': \n",
+ " -0.022242965000000038} \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -4.800799, \n",
+ " 'Objective': \n",
+ " -0.008257226000000043} \n",
+ " \n",
+ "
\n"
+ ],
+ "text/plain": [
+ " \n",
+ " \u001B[1;35m \u001B[0m\u001B[1;35mtext \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35msubjectivity_detection\u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mscore \u001B[0m\u001B[1;35m \u001B[0m \u001B[1;35m \u001B[0m\u001B[1;35mground_truth__x__subโฆ\u001B[0m\u001B[1;35m \u001B[0m \n",
+ " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ \n",
+ " The mic is great. Subjective Subjective {'Subjective': True \n",
+ " -0.022607480000000055, \n",
+ " 'Objective': -3.80076} \n",
+ " \u001B[2m \u001B[0m\u001B[2mWill order from them \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mSubjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mSubjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mTrue \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2magain! \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.05627503599999997, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-2.9055107} \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " Not loud enough and Objective Objective {'Subjective': True \n",
+ " doesn't turn on like -2.897738, \n",
+ " it should. 'Objective': \n",
+ " -0.05672692499999995} \n",
+ " \u001B[2m \u001B[0m\u001B[2mThe phone doesn't seem\u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mObjective \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m{'Subjective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2mTrue \u001B[0m\u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mto accept anything \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-3.8168292, \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m\u001B[2mexcept CBR mp3s \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m'Objective': \u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m \u001B[2m \u001B[0m\u001B[2m-0.022242965000000038}\u001B[0m\u001B[2m \u001B[0m \u001B[2m \u001B[0m \n",
+ " All three broke within Objective Objective {'Subjective': True \n",
+ " two months of use. -4.800799, \n",
+ " 'Objective': \n",
+ " -0.008257226000000043} \n",
+ " \n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Analyze evaluation experience ...\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Analyze evaluation experience \u001B[33m...\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Number of errors: 0\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Number of errors: \u001B[1;36m0\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy = 100.00%\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;31mAccuracy = \u001B[0m\u001B[1;36m100.00\u001B[0m\u001B[1;31m%\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Accuracy threshold reached (1.0 >= 0.95)\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Accuracy threshold reached \u001B[1m(\u001B[0m\u001B[1;36m1.0\u001B[0m >= \u001B[1;36m0.95\u001B[0m\u001B[1m)\u001B[0m\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "Train is done!\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Train is done!\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "learning_experience = agent.learn(learning_iterations=3, accuracy_threshold=0.95)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ee1573e3",
+ "metadata": {},
+ "source": [
+ "Let's see the final instructions:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "f5b67bd4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "Total Agent Skills: 1\n",
+ "\n",
+ "subjectivity_detection\n",
+ "Identify if the provided product review is \"Subjective\" (expressing personal feelings, tastes, or opinions) or \n",
+ "\"Objective\" (based on factual information). Consider a statement as subjective if it reflects personal judgment or \n",
+ "preference, and as objective if it states verifiable facts or features.\n",
+ "\n",
+ "Examples:\n",
+ "\n",
+ "Input: Not loud enough and doesn't turn on like it should.\n",
+ "Output: Objective\n",
+ "\n",
+ "Input: I personally think the sound quality is not up to the mark.\n",
+ "Output: Subjective\n",
+ "\n",
+ "Input: The phone's battery lasts for 10 hours.\n",
+ "Output: Objective\n",
+ "\n",
+ "Input: The mic is great.\n",
+ "Output: Subjective\n",
+ "\n",
+ "Input: Will order from them again!\n",
+ "Output: Subjective\n",
+ "\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "\u001B[1;34mTotal Agent Skills: \u001B[0m\u001B[1;34m1\u001B[0m\n",
+ "\n",
+ "\u001B[1;4;32msubjectivity_detection\u001B[0m\n",
+ "\u001B[32mIdentify if the provided product review is \u001B[0m\u001B[32m\"Subjective\"\u001B[0m\u001B[32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[32mexpressing personal feelings, tastes, or opinions\u001B[0m\u001B[1;32m)\u001B[0m\u001B[32m or \u001B[0m\n",
+ "\u001B[32m\"Objective\"\u001B[0m\u001B[32m \u001B[0m\u001B[1;32m(\u001B[0m\u001B[32mbased on factual information\u001B[0m\u001B[1;32m)\u001B[0m\u001B[32m. Consider a statement as subjective if it reflects personal judgment or \u001B[0m\n",
+ "\u001B[32mpreference, and as objective if it states verifiable facts or features.\u001B[0m\n",
+ "\n",
+ "\u001B[32mExamples:\u001B[0m\n",
+ "\n",
+ "\u001B[32mInput: Not loud enough and doesn't turn on like it should.\u001B[0m\n",
+ "\u001B[32mOutput: Objective\u001B[0m\n",
+ "\n",
+ "\u001B[32mInput: I personally think the sound quality is not up to the mark.\u001B[0m\n",
+ "\u001B[32mOutput: Subjective\u001B[0m\n",
+ "\n",
+ "\u001B[32mInput: The phone's battery lasts for \u001B[0m\u001B[1;32m10\u001B[0m\u001B[32m hours.\u001B[0m\n",
+ "\u001B[32mOutput: Objective\u001B[0m\n",
+ "\n",
+ "\u001B[32mInput: The mic is great.\u001B[0m\n",
+ "\u001B[32mOutput: Subjective\u001B[0m\n",
+ "\n",
+ "\u001B[32mInput: Will order from them again!\u001B[0m\n",
+ "\u001B[32mOutput: Subjective\u001B[0m\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "print(agent.skills)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "54ec4568",
+ "metadata": {},
+ "source": [
+ "... and predictions created by the skill:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "baa69db8",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " ground_truth | \n",
+ " subjectivity_detection | \n",
+ " score | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " The mic is great. | \n",
+ " Subjective | \n",
+ " Subjective | \n",
+ " {'Subjective': -0.022607480000000055, 'Objecti... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Will order from them again! | \n",
+ " Subjective | \n",
+ " Subjective | \n",
+ " {'Subjective': -0.05627503599999997, 'Objectiv... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Not loud enough and doesn't turn on like it sh... | \n",
+ " Objective | \n",
+ " Objective | \n",
+ " {'Subjective': -2.897738, 'Objective': -0.0567... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " The phone doesn't seem to accept anything exce... | \n",
+ " Objective | \n",
+ " Objective | \n",
+ " {'Subjective': -3.8168292, 'Objective': -0.022... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " All three broke within two months of use. | \n",
+ " Objective | \n",
+ " Objective | \n",
+ " {'Subjective': -4.800799, 'Objective': -0.0082... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text ground_truth \\\n",
+ "0 The mic is great. Subjective \n",
+ "1 Will order from them again! Subjective \n",
+ "2 Not loud enough and doesn't turn on like it sh... Objective \n",
+ "3 The phone doesn't seem to accept anything exce... Objective \n",
+ "4 All three broke within two months of use. Objective \n",
+ "\n",
+ " subjectivity_detection score \n",
+ "0 Subjective {'Subjective': -0.022607480000000055, 'Objecti... \n",
+ "1 Subjective {'Subjective': -0.05627503599999997, 'Objectiv... \n",
+ "2 Objective {'Subjective': -2.897738, 'Objective': -0.0567... \n",
+ "3 Objective {'Subjective': -3.8168292, 'Objective': -0.022... \n",
+ "4 Objective {'Subjective': -4.800799, 'Objective': -0.0082... "
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "learning_experience.predictions"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b8d49385",
+ "metadata": {},
+ "source": [
+ "## Applying learned skills to the real data\n",
+ "\n",
+ "Now as we have our Agent with evolved \"subjectivity detection\" skill, we can apply it to the real dataset without ground truth data:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "60a79462",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Doesn't hold charge. | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Excellent bluetooth headset | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " I love this thing! | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " VERY DISAPPOINTED. | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text\n",
+ "0 Doesn't hold charge.\n",
+ "1 Excellent bluetooth headset\n",
+ "2 I love this thing!\n",
+ "3 VERY DISAPPOINTED."
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "test_df = pd.DataFrame([\n",
+ " \"Doesn't hold charge.\",\n",
+ " \"Excellent bluetooth headset\",\n",
+ " \"I love this thing!\",\n",
+ " \"VERY DISAPPOINTED.\"\n",
+ "], columns=['text'])\n",
+ "test_df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "2f2bf273",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 4/4 [00:00<00:00, 32.32it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "result = agent.apply_skills(test_df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "e6c50ede",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " subjectivity_detection | \n",
+ " score | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Doesn't hold charge. | \n",
+ " Objective | \n",
+ " {'Subjective': -4.9062243, 'Objective': -0.007... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Excellent bluetooth headset | \n",
+ " Objective | \n",
+ " {'Subjective': -1.450324, 'Objective': -0.2672... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " I love this thing! | \n",
+ " Subjective | \n",
+ " {'Subjective': -0.0014673689999999905, 'Object... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " VERY DISAPPOINTED. | \n",
+ " Subjective | \n",
+ " {'Subjective': -0.17851222999999997, 'Objectiv... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text subjectivity_detection \\\n",
+ "0 Doesn't hold charge. Objective \n",
+ "1 Excellent bluetooth headset Objective \n",
+ "2 I love this thing! Subjective \n",
+ "3 VERY DISAPPOINTED. Subjective \n",
+ "\n",
+ " score \n",
+ "0 {'Subjective': -4.9062243, 'Objective': -0.007... \n",
+ "1 {'Subjective': -1.450324, 'Objective': -0.2672... \n",
+ "2 {'Subjective': -0.0014673689999999905, 'Object... \n",
+ "3 {'Subjective': -0.17851222999999997, 'Objectiv... "
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "result.predictions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0922915b",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
\ No newline at end of file
diff --git a/examples/summarization_skill.ipynb b/examples/summarization_skill.ipynb
new file mode 100644
index 00000000..27c47923
--- /dev/null
+++ b/examples/summarization_skill.ipynb
@@ -0,0 +1,192 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Summarization skill"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Caffeine comes from coffee beans, but it can a... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Vitamin C is a water-soluble essential vitamin... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Vitamin D is a fat-soluble nutrient. It is one... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text\n",
+ "0 Caffeine comes from coffee beans, but it can a...\n",
+ "1 Vitamin C is a water-soluble essential vitamin...\n",
+ "2 Vitamin D is a fat-soluble nutrient. It is one..."
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " \"Caffeine comes from coffee beans, but it can also be synthesized in a laboratory. It has the same structure whether itโs in coffee, energy drinks, tea, or pills. Caffeine is a powerful stimulant, and it can be used to improve physical strength and endurance. It is classified as a nootropic because it sensitizes neurons and provides mental stimulation. Habitual caffeine use is also associated with a reduced risk of Alzheimer's disease, cirrhosis, and liver cancer. Caffeineโs main mechanism concerns antagonizing adenosine receptors. Adenosine causes sedation and relaxation when it acts upon its receptors, located in the brain. Caffeine prevents this action and causes alertness and wakefulness. This inhibition of adenosine can influence the dopamine, serotonin, acetylcholine, and adrenaline systems. For practical tips on the optimal use of caffeine, check out our Supplement Guides.\",\n",
+ " \"Vitamin C is a water-soluble essential vitamin that can be found in fruits and vegetables, especially citrus. Humans are unable to synthesize vitamin C from their bodies, so it must be acquired through dietary intake. Vitamin C is important for immune system function and is a powerful antioxidant. It also acts as a cofactor for collagen synthesis.[2]. People often supplement with vitamin C when they have a cold. According to various studies, vitamin C may be effective in reducing the duration of a cold, but does not seem to reduce the frequency of colds in a population.[3][4] The available literature suggests that a dose ranging from 200 mg to 2,000 mg could be beneficial for reducing cold duration.Often utilized for its antioxidant effects, vitamin C has been studied for its potential role in Alzheimerโs disease and cancer. Lower vitamin C levels are present in people with Alzheimerโs, even with adequate dietary intake.[5] It is thought that oxidative stress plays a major role in the pathogenesis of the disease, so vitamin Cโs antioxidative effects could be beneficial.[6][7] In rodent studies, oral vitamin C was able to reduce oxidative and inflammatory biomarkers.[8] In recent cancer research, vitamin C was found to promote oxidative stress in cancer cells, leading to cytotoxic effects at high doses in mice.[9] While promising, further research and human studies are required to determine efficacy.\",\n",
+ " \"Vitamin D is a fat-soluble nutrient. It is one of the 24 micronutrients critical for human survival. The sun is the major natural source through eliciting vitamin D production in the skin, but vitamin D is also found naturally in oily fish and eggs and is added to milk and milk alternatives. Supplemental vitamin D is associated with a range of benefits, including improved immune health, bone health, and well-being. Supplementation may also reduce the risk of cancer mortality, diabetes, and multiple sclerosis.The effects of vitamin D likely depend on a personโs circulating levels of 25-hydroxyvitamin D (25(OH)D; a form of vitamin D that is measured in blood samples to determine vitamin D status), and many of its benefits will only be seen when a deficiency is reversed.\"\n",
+ "], columns=['text'])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโโโ| 3/3 [00:05<00:00, 1.73s/it]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " summarization | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Caffeine comes from coffee beans, but it can a... | \n",
+ " \\nCaffeine is a stimulant found in coffee bean... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Vitamin C is a water-soluble essential vitamin... | \n",
+ " \\nVitamin C is an essential water-soluble vita... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Vitamin D is a fat-soluble nutrient. It is one... | \n",
+ " \\nVitamin D is a fat-soluble nutrient that is ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text \\\n",
+ "0 Caffeine comes from coffee beans, but it can a... \n",
+ "1 Vitamin C is a water-soluble essential vitamin... \n",
+ "2 Vitamin D is a fat-soluble nutrient. It is one... \n",
+ "\n",
+ " summarization \n",
+ "0 \\nCaffeine is a stimulant found in coffee bean... \n",
+ "1 \\nVitamin C is an essential water-soluble vita... \n",
+ "2 \\nVitamin D is a fat-soluble nutrient that is ... "
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.skills.generation.summarization import SummarizationSkill\n",
+ "\n",
+ "agent = Agent(\n",
+ " skills=SummarizationSkill(\n",
+ " name='summarization',\n",
+ " input_data_field='text'\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "run = agent.apply_skills(df)\n",
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/text_generation_skill.ipynb b/examples/text_generation_skill.ipynb
new file mode 100644
index 00000000..88eb374a
--- /dev/null
+++ b/examples/text_generation_skill.ipynb
@@ -0,0 +1,338 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Text generation skill"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " completion_target | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Imagine a fusion of traditional retail with vi... | \n",
+ " In this futuristic retail scenario, customers ... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Draft a message to shareholders highlighting o... | \n",
+ " Dear valued shareholders, As we look ahead, ou... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Describe the office of the future in a post-pa... | \n",
+ " The office of the future is a hybrid space tha... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " How can we integrate AI into our customer serv... | \n",
+ " Integrating AI into customer service can be ac... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Provide a vision statement for a tech company ... | \n",
+ " Empowering every individual, no matter their l... | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " Suggest a strategy to improve employee wellnes... | \n",
+ " Promote regular digital detox hours, provide e... | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " How can businesses benefit from embracing bloc... | \n",
+ " By adopting blockchain, businesses can achieve... | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " Draft a mission statement for a startup focuse... | \n",
+ " Transforming discarded materials into timeless... | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " Propose an innovative method for businesses to... | \n",
+ " Harness augmented reality (AR) to create immer... | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " How can companies ensure data privacy in the a... | \n",
+ " Companies can adopt zero-knowledge proofs, dec... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text \\\n",
+ "0 Imagine a fusion of traditional retail with vi... \n",
+ "1 Draft a message to shareholders highlighting o... \n",
+ "2 Describe the office of the future in a post-pa... \n",
+ "3 How can we integrate AI into our customer serv... \n",
+ "4 Provide a vision statement for a tech company ... \n",
+ "5 Suggest a strategy to improve employee wellnes... \n",
+ "6 How can businesses benefit from embracing bloc... \n",
+ "7 Draft a mission statement for a startup focuse... \n",
+ "8 Propose an innovative method for businesses to... \n",
+ "9 How can companies ensure data privacy in the a... \n",
+ "\n",
+ " completion_target \n",
+ "0 In this futuristic retail scenario, customers ... \n",
+ "1 Dear valued shareholders, As we look ahead, ou... \n",
+ "2 The office of the future is a hybrid space tha... \n",
+ "3 Integrating AI into customer service can be ac... \n",
+ "4 Empowering every individual, no matter their l... \n",
+ "5 Promote regular digital detox hours, provide e... \n",
+ "6 By adopting blockchain, businesses can achieve... \n",
+ "7 Transforming discarded materials into timeless... \n",
+ "8 Harness augmented reality (AR) to create immer... \n",
+ "9 Companies can adopt zero-knowledge proofs, dec... "
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " {\"text\": \"Imagine a fusion of traditional retail with virtual reality. Describe the customer experience.\", \"completion_target\": \"In this futuristic retail scenario, customers wear VR headsets as they enter physical stores. They can instantly see personalized recommendations, virtual try-ons, and immersive product demos, blending the tactile benefits of in-person shopping with the endless possibilities of the digital realm.\"},\n",
+ " \n",
+ " {\"text\": \"Draft a message to shareholders highlighting our commitment to sustainable practices.\", \"completion_target\": \"Dear valued shareholders, As we look ahead, our commitment to sustainable and eco-friendly practices remains unwavering. We understand the critical importance of conserving our planet for future generations and are continually implementing innovative solutions to reduce our carbon footprint and promote ethical operations.\"},\n",
+ " \n",
+ " {\"text\": \"Describe the office of the future in a post-pandemic world.\", \"completion_target\": \"The office of the future is a hybrid space that seamlessly integrates remote and in-person work. Enhanced with smart technology, it's equipped for virtual collaborations and is adaptable to individual needs, prioritizing health, safety, and flexibility.\"},\n",
+ " \n",
+ " {\"text\": \"How can we integrate AI into our customer service without losing the human touch?\", \"completion_target\": \"Integrating AI into customer service can be achieved by using chatbots for routine queries and ensuring human agents handle more complex or emotional issues. Training AI with empathy in responses, while always providing an option for customers to connect with a human, can strike the right balance.\"},\n",
+ " \n",
+ " {\"text\": \"Provide a vision statement for a tech company aiming to bridge the digital divide in rural areas.\", \"completion_target\": \"Empowering every individual, no matter their location, with equal and unhindered access to the digital realm, we strive to erase boundaries and create a universally connected world.\"},\n",
+ " \n",
+ " {\"text\": \"Suggest a strategy to improve employee wellness in remote working conditions.\", \"completion_target\": \"Promote regular digital detox hours, provide ergonomic home office equipment subsidies, organize virtual fitness sessions, and ensure frequent mental health check-ins and resources to cultivate a culture of well-being.\"},\n",
+ " \n",
+ " {\"text\": \"How can businesses benefit from embracing blockchain technology?\", \"completion_target\": \"By adopting blockchain, businesses can achieve enhanced data security, improved supply chain transparency, efficient and transparent transactions, reduced fraud, and can tap into new models like token economies.\"},\n",
+ " \n",
+ " {\"text\": \"Draft a mission statement for a startup focused on upcycling waste materials into high-end fashion.\", \"completion_target\": \"Transforming discarded materials into timeless fashion, we're not just designing clothing, but pioneering a movement. Our mission is to redefine luxury with sustainability at its heart.\"},\n",
+ " \n",
+ " {\"text\": \"Propose an innovative method for businesses to retain customers in an increasingly competitive digital market.\", \"completion_target\": \"Harness augmented reality (AR) to create immersive product experiences. Allow customers to virtually 'experience' products in their real environment before purchasing, bridging the gap between the digital and physical worlds.\"},\n",
+ " \n",
+ " {\"text\": \"How can companies ensure data privacy in the age of big data and AI?\", \"completion_target\": \"Companies can adopt zero-knowledge proofs, decentralized data storage, regularly audit AI algorithms for bias, maintain transparent data policies, and educate users about their data rights to ensure robust data privacy.\"}\n",
+ "])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโ| 10/10 [00:24<00:00, 2.48s/it]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " completion_target | \n",
+ " text_generation | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " Imagine a fusion of traditional retail with vi... | \n",
+ " In this futuristic retail scenario, customers ... | \n",
+ " The customer walks into a store that looks lik... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " Draft a message to shareholders highlighting o... | \n",
+ " Dear valued shareholders, As we look ahead, ou... | \n",
+ " Dear Shareholders,\\n\\nI am pleased to announce... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Describe the office of the future in a post-pa... | \n",
+ " The office of the future is a hybrid space tha... | \n",
+ " The office of the future in a post-pandemic wo... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " How can we integrate AI into our customer serv... | \n",
+ " Integrating AI into customer service can be ac... | \n",
+ " One way to integrate AI into customer service ... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " Provide a vision statement for a tech company ... | \n",
+ " Empowering every individual, no matter their l... | \n",
+ " Our vision is to create a world where technolo... | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " Suggest a strategy to improve employee wellnes... | \n",
+ " Promote regular digital detox hours, provide e... | \n",
+ " One strategy to improve employee wellness in r... | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " How can businesses benefit from embracing bloc... | \n",
+ " By adopting blockchain, businesses can achieve... | \n",
+ " \\nBusinesses can benefit greatly from embracin... | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " Draft a mission statement for a startup focuse... | \n",
+ " Transforming discarded materials into timeless... | \n",
+ " \\nAt our startup, we are passionate about crea... | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " Propose an innovative method for businesses to... | \n",
+ " Harness augmented reality (AR) to create immer... | \n",
+ " One potential solution could be implementing a... | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " How can companies ensure data privacy in the a... | \n",
+ " Companies can adopt zero-knowledge proofs, dec... | \n",
+ " In order to ensure data privacy in the age of ... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text \\\n",
+ "0 Imagine a fusion of traditional retail with vi... \n",
+ "1 Draft a message to shareholders highlighting o... \n",
+ "2 Describe the office of the future in a post-pa... \n",
+ "3 How can we integrate AI into our customer serv... \n",
+ "4 Provide a vision statement for a tech company ... \n",
+ "5 Suggest a strategy to improve employee wellnes... \n",
+ "6 How can businesses benefit from embracing bloc... \n",
+ "7 Draft a mission statement for a startup focuse... \n",
+ "8 Propose an innovative method for businesses to... \n",
+ "9 How can companies ensure data privacy in the a... \n",
+ "\n",
+ " completion_target \\\n",
+ "0 In this futuristic retail scenario, customers ... \n",
+ "1 Dear valued shareholders, As we look ahead, ou... \n",
+ "2 The office of the future is a hybrid space tha... \n",
+ "3 Integrating AI into customer service can be ac... \n",
+ "4 Empowering every individual, no matter their l... \n",
+ "5 Promote regular digital detox hours, provide e... \n",
+ "6 By adopting blockchain, businesses can achieve... \n",
+ "7 Transforming discarded materials into timeless... \n",
+ "8 Harness augmented reality (AR) to create immer... \n",
+ "9 Companies can adopt zero-knowledge proofs, dec... \n",
+ "\n",
+ " text_generation \n",
+ "0 The customer walks into a store that looks lik... \n",
+ "1 Dear Shareholders,\\n\\nI am pleased to announce... \n",
+ "2 The office of the future in a post-pandemic wo... \n",
+ "3 One way to integrate AI into customer service ... \n",
+ "4 Our vision is to create a world where technolo... \n",
+ "5 One strategy to improve employee wellness in r... \n",
+ "6 \\nBusinesses can benefit greatly from embracin... \n",
+ "7 \\nAt our startup, we are passionate about crea... \n",
+ "8 One potential solution could be implementing a... \n",
+ "9 In order to ensure data privacy in the age of ... "
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.skills.generation.base import TextGenerationSkill\n",
+ "\n",
+ "agent = Agent(\n",
+ " skills=TextGenerationSkill(\n",
+ " name='text_generation',\n",
+ " input_data_field='text'\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "run = agent.apply_skills(df)\n",
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/translation_skill.ipynb b/examples/translation_skill.ipynb
new file mode 100644
index 00000000..d830f364
--- /dev/null
+++ b/examples/translation_skill.ipynb
@@ -0,0 +1,312 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "94ad15ac",
+ "metadata": {},
+ "source": [
+ "# Translation skill"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "a2f6d99b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " language | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " El sol brilla siempre | \n",
+ " Spanish | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " La vie est belle | \n",
+ " French | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Der Wald ruft mich | \n",
+ " German | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Amo la pizza napoletana | \n",
+ " Italian | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " ๆฅๅคฉ็่ฑๅพ็พ | \n",
+ " Chinese | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " ะะฒะตะทะดั ัะฒะตัะบะฐัั ะฝะพััั | \n",
+ " Russian | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " ้จใฎๅพใฎ่น | \n",
+ " Japanese | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " ์ปคํผ๊ฐ ํ์ํด | \n",
+ " Korean | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " A mรบsica toca a alma | \n",
+ " Portuguese | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " เคธเคชเคจเฅ เคธเค เคนเฅเคคเฅ เคนเฅเค | \n",
+ " Hindi | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text language\n",
+ "0 El sol brilla siempre Spanish\n",
+ "1 La vie est belle French\n",
+ "2 Der Wald ruft mich German\n",
+ "3 Amo la pizza napoletana Italian\n",
+ "4 ๆฅๅคฉ็่ฑๅพ็พ Chinese\n",
+ "5 ะะฒะตะทะดั ัะฒะตัะบะฐัั ะฝะพััั Russian\n",
+ "6 ้จใฎๅพใฎ่น Japanese\n",
+ "7 ์ปคํผ๊ฐ ํ์ํด Korean\n",
+ "8 A mรบsica toca a alma Portuguese\n",
+ "9 เคธเคชเคจเฅ เคธเค เคนเฅเคคเฅ เคนเฅเค Hindi"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import pandas as pd\n",
+ "df = pd.DataFrame([\n",
+ " {\"text\": \"El sol brilla siempre\", \"language\": \"Spanish\"},\n",
+ " {\"text\": \"La vie est belle\", \"language\": \"French\"}, \n",
+ " {\"text\": \"Der Wald ruft mich\", \"language\": \"German\"},\n",
+ " {\"text\": \"Amo la pizza napoletana\", \"language\": \"Italian\"},\n",
+ " {\"text\": \"ๆฅๅคฉ็่ฑๅพ็พ\", \"language\": \"Chinese\"}, \n",
+ " {\"text\": \"ะะฒะตะทะดั ัะฒะตัะบะฐัั ะฝะพััั\", \"language\": \"Russian\"},\n",
+ " {\"text\": \"้จใฎๅพใฎ่น\", \"language\": \"Japanese\"},\n",
+ " {\"text\": \"์ปคํผ๊ฐ ํ์ํด\", \"language\": \"Korean\"},\n",
+ " {\"text\": \"A mรบsica toca a alma\", \"language\": \"Portuguese\"},\n",
+ " {\"text\": \"เคธเคชเคจเฅ เคธเค เคนเฅเคคเฅ เคนเฅเค\", \"language\": \"Hindi\"}\n",
+ "])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "6ee2cebf",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "100%|โโโโโโโโโโโโโโโ| 10/10 [00:08<00:00, 1.13it/s]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from adala.agents import Agent\n",
+ "from adala.environments import BasicEnvironment\n",
+ "from adala.skills.generation.translation import TranslationSkill\n",
+ "from rich import print\n",
+ "\n",
+ "agent = Agent(skills=TranslationSkill(input_data_field='text', target_language='Swahili'))\n",
+ "\n",
+ "run = agent.apply_skills(df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "ee97ee22",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " text | \n",
+ " language | \n",
+ " translation | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " El sol brilla siempre | \n",
+ " Spanish | \n",
+ " Jua huangaza daima | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " La vie est belle | \n",
+ " French | \n",
+ " Maisha ni mazuri | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " Der Wald ruft mich | \n",
+ " German | \n",
+ " Msitu unaniita | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " Amo la pizza napoletana | \n",
+ " Italian | \n",
+ " Napenda pizza ya Napoli | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " ๆฅๅคฉ็่ฑๅพ็พ | \n",
+ " Chinese | \n",
+ " Maua ya msimu wa kuchipua ni mazuri sana | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " ะะฒะตะทะดั ัะฒะตัะบะฐัั ะฝะพััั | \n",
+ " Russian | \n",
+ " Nyota zinang'aa usiku | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " ้จใฎๅพใฎ่น | \n",
+ " Japanese | \n",
+ " Mvua baada ya upinde wa mvua | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " ์ปคํผ๊ฐ ํ์ํด | \n",
+ " Korean | \n",
+ " Ninahitaji kahawa | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " A mรบsica toca a alma | \n",
+ " Portuguese | \n",
+ " Muziki hucheza roho | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " เคธเคชเคจเฅ เคธเค เคนเฅเคคเฅ เคนเฅเค | \n",
+ " Hindi | \n",
+ " Ndoto zinatimia | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " text language \\\n",
+ "0 El sol brilla siempre Spanish \n",
+ "1 La vie est belle French \n",
+ "2 Der Wald ruft mich German \n",
+ "3 Amo la pizza napoletana Italian \n",
+ "4 ๆฅๅคฉ็่ฑๅพ็พ Chinese \n",
+ "5 ะะฒะตะทะดั ัะฒะตัะบะฐัั ะฝะพััั Russian \n",
+ "6 ้จใฎๅพใฎ่น Japanese \n",
+ "7 ์ปคํผ๊ฐ ํ์ํด Korean \n",
+ "8 A mรบsica toca a alma Portuguese \n",
+ "9 เคธเคชเคจเฅ เคธเค เคนเฅเคคเฅ เคนเฅเค Hindi \n",
+ "\n",
+ " translation \n",
+ "0 Jua huangaza daima \n",
+ "1 Maisha ni mazuri \n",
+ "2 Msitu unaniita \n",
+ "3 Napenda pizza ya Napoli \n",
+ "4 Maua ya msimu wa kuchipua ni mazuri sana \n",
+ "5 Nyota zinang'aa usiku \n",
+ "6 Mvua baada ya upinde wa mvua \n",
+ "7 Ninahitaji kahawa \n",
+ "8 Muziki hucheza roho \n",
+ "9 Ndoto zinatimia "
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "run.predictions"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/pdm.lock b/pdm.lock
new file mode 100644
index 00000000..795173d3
--- /dev/null
+++ b/pdm.lock
@@ -0,0 +1,1347 @@
+# This file is @generated by PDM.
+# It is not intended for manual editing.
+
+[metadata]
+groups = ["default"]
+cross_platform = true
+static_urls = false
+lock_version = "4.3"
+content_hash = "sha256:393586f004c3f00bb1aaa5d0c7f71d670c4491e579daaf0e2df40762dfbd660a"
+
+[[package]]
+name = "aiohttp"
+version = "3.8.6"
+requires_python = ">=3.6"
+summary = "Async http client/server framework (asyncio)"
+dependencies = [
+ "aiosignal>=1.1.2",
+ "async-timeout<5.0,>=4.0.0a3",
+ "attrs>=17.3.0",
+ "charset-normalizer<4.0,>=2.0",
+ "frozenlist>=1.1.1",
+ "multidict<7.0,>=4.5",
+ "yarl<2.0,>=1.0",
+]
+files = [
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"},
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"},
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"},
+ {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"},
+ {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"},
+ {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"},
+ {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"},
+ {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"},
+ {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"},
+ {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"},
+ {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"},
+ {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"},
+]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+requires_python = ">=3.7"
+summary = "aiosignal: a list of registered asynchronous callbacks"
+dependencies = [
+ "frozenlist>=1.1.0",
+]
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[[package]]
+name = "annotated-types"
+version = "0.6.0"
+requires_python = ">=3.8"
+summary = "Reusable constraint types to use with typing.Annotated"
+dependencies = [
+ "typing-extensions>=4.0.0; python_version < \"3.9\"",
+]
+files = [
+ {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"},
+ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"},
+]
+
+[[package]]
+name = "async-timeout"
+version = "4.0.3"
+requires_python = ">=3.7"
+summary = "Timeout context manager for asyncio programs"
+files = [
+ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
+ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+requires_python = ">=3.7"
+summary = "Classes Without Boilerplate"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[[package]]
+name = "cachetools"
+version = "5.3.2"
+requires_python = ">=3.7"
+summary = "Extensible memoizing collections and decorators"
+files = [
+ {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"},
+ {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"},
+]
+
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+requires_python = ">=3.6"
+summary = "Python package for providing Mozilla's CA Bundle."
+files = [
+ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+ {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.16.0"
+requires_python = ">=3.8"
+summary = "Foreign Function Interface for Python calling C code."
+dependencies = [
+ "pycparser",
+]
+files = [
+ {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"},
+ {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"},
+ {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"},
+ {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"},
+ {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"},
+ {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"},
+ {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"},
+ {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"},
+ {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"},
+ {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"},
+ {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"},
+ {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"},
+ {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"},
+ {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"},
+ {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"},
+ {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"},
+ {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"},
+ {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"},
+ {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"},
+ {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"},
+ {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"},
+ {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"},
+ {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"},
+ {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"},
+ {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"},
+ {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"},
+ {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.1"
+requires_python = ">=3.7.0"
+summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+files = [
+ {file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"},
+ {file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+requires_python = ">=3.7"
+summary = "Composable command line interface toolkit"
+dependencies = [
+ "colorama; platform_system == \"Windows\"",
+]
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+summary = "Cross-platform colored terminal text."
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "cryptography"
+version = "41.0.5"
+requires_python = ">=3.7"
+summary = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+dependencies = [
+ "cffi>=1.12",
+]
+files = [
+ {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797"},
+ {file = "cryptography-41.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5"},
+ {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147"},
+ {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696"},
+ {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da"},
+ {file = "cryptography-41.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20"},
+ {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548"},
+ {file = "cryptography-41.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d"},
+ {file = "cryptography-41.0.5-cp37-abi3-win32.whl", hash = "sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936"},
+ {file = "cryptography-41.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81"},
+ {file = "cryptography-41.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1"},
+ {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72"},
+ {file = "cryptography-41.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88"},
+ {file = "cryptography-41.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf"},
+ {file = "cryptography-41.0.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e"},
+ {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8"},
+ {file = "cryptography-41.0.5-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179"},
+ {file = "cryptography-41.0.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d"},
+ {file = "cryptography-41.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1"},
+ {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86"},
+ {file = "cryptography-41.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723"},
+ {file = "cryptography-41.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84"},
+ {file = "cryptography-41.0.5.tar.gz", hash = "sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7"},
+]
+
+[[package]]
+name = "diskcache"
+version = "5.6.3"
+requires_python = ">=3"
+summary = "Disk Cache -- Disk and file backed persistent cache."
+files = [
+ {file = "diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19"},
+ {file = "diskcache-5.6.3.tar.gz", hash = "sha256:2c3a3fa2743d8535d832ec61c2054a1641f41775aa7c556758a109941e33e4fc"},
+]
+
+[[package]]
+name = "frozenlist"
+version = "1.4.0"
+requires_python = ">=3.8"
+summary = "A list-like structure which implements collections.abc.MutableSequence"
+files = [
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
+ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
+]
+
+[[package]]
+name = "gptcache"
+version = "0.1.42"
+requires_python = ">=3.8.1"
+summary = "GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, similar to how Redis works for traditional applications."
+dependencies = [
+ "cachetools",
+ "numpy",
+ "requests",
+]
+files = [
+ {file = "gptcache-0.1.42-py3-none-any.whl", hash = "sha256:8da93cd9fdc3a1c09aae25b688823b4a5bc28dcfa4522e33617f3f7a9e5b8bb0"},
+ {file = "gptcache-0.1.42.tar.gz", hash = "sha256:17339c41d992bd47c623c716be3bd915dba2687a0fa52aa4ab4ed9cc7cc2b256"},
+]
+
+[[package]]
+name = "guidance"
+version = "0.0.64"
+summary = "A guidance language for controlling large language models."
+dependencies = [
+ "aiohttp",
+ "diskcache",
+ "gptcache",
+ "msal",
+ "nest-asyncio",
+ "numpy",
+ "openai>=0.27.8",
+ "platformdirs",
+ "pygtrie",
+ "pyparsing>=3.0.0",
+ "requests",
+ "tiktoken>=0.3",
+]
+files = [
+ {file = "guidance-0.0.64-py3-none-any.whl", hash = "sha256:b15b3bc667bb5b6e9b574781cab7c1ce7fa0a6e705651595bbd8630d124c045a"},
+ {file = "guidance-0.0.64.tar.gz", hash = "sha256:baaee2c791fe853c920b5964661bb63155feb58f84c25e45f83c47f63c4e58dd"},
+]
+
+[[package]]
+name = "hiredis"
+version = "2.2.3"
+requires_python = ">=3.7"
+summary = "Python wrapper for hiredis"
+files = [
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:9a1a80a8fa767f2fdc3870316a54b84fe9fc09fa6ab6a2686783de6a228a4604"},
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3f006c28c885deb99b670a5a66f367a175ab8955b0374029bad7111f5357dcd4"},
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffaf841546905d90ff189de7397aa56413b1ce5e54547f17a98f0ebf3a3b0a3b"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cadb0ac7ba3babfd804e425946bec9717b320564a1390f163a54af9365a720a"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33bc4721632ef9708fa44e5df0066053fccc8e65410a2c48573192517a533b48"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:227c5b4bcb60f89008c275d596e4a7b6625a6b3c827b8a66ae582eace7051f71"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61995eb826009d99ed8590747bc0da683a5f4fbb4faa8788166bf3810845cd5c"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f969edc851efe23010e0f53a64269f2629a9364135e9ec81c842e8b2277d0c1"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27e560eefb57914d742a837f1da98d3b29cb22eff013c8023b7cf52ae6e051d"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3759f4789ae1913b7df278dfc9e8749205b7a106f888cd2903d19461e24a7697"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c6cb613148422c523945cdb8b6bed617856f2602fd8750e33773ede2616e55d5"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:1d274d5c511dfc03f83f997d3238eaa9b6ee3f982640979f509373cced891e98"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b7fe075e91b9d9cff40eba4fb6a8eff74964d3979a39be9a9ef58b1b4cb3604"},
+ {file = "hiredis-2.2.3-cp310-cp310-win32.whl", hash = "sha256:77924b0d32fd1f493d3df15d9609ddf9d94c31a364022a6bf6b525ce9da75bea"},
+ {file = "hiredis-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:dcb0569dd5bfe6004658cd0f229efa699a3169dcb4f77bd72e188adda302063d"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:d115790f18daa99b5c11a506e48923b630ef712e9e4b40482af942c3d40638b8"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c3b8be557e08b234774925622e196f0ee36fe4eab66cd19df934d3efd8f3743"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f5446068197b35a11ccc697720c41879c8657e2e761aaa8311783aac84cef20"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa17a3b22b3726d54d7af20394f65d4a1735a842a4e0f557dc67a90f6965c4bc"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7df645b6b7800e8b748c217fbd6a4ca8361bcb9a1ae6206cc02377833ec8a1aa"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fb9300959a0048138791f3d68359d61a788574ec9556bddf1fec07f2dbc5320"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d7e459fe7313925f395148d36d9b7f4f8dac65be06e45d7af356b187cef65fc"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8eceffca3941775b646cd585cd19b275d382de43cc3327d22f7c75d7b003d481"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b17baf702c6e5b4bb66e1281a3efbb1d749c9d06cdb92b665ad81e03118f78fc"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e43e2b5acaad09cf48c032f7e4926392bb3a3f01854416cf6d82ebff94d5467"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a7205497d7276a81fe92951a29616ef96562ed2f91a02066f72b6f93cb34b40e"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:126623b03c31cb6ac3e0d138feb6fcc36dd43dd34fc7da7b7a0c38b5d75bc896"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:071c5814b850574036506a8118034f97c3cbf2fe9947ff45a27b07a48da56240"},
+ {file = "hiredis-2.2.3-cp311-cp311-win32.whl", hash = "sha256:d1be9e30e675f5bc1cb534633324578f6f0944a1bcffe53242cf632f554f83b6"},
+ {file = "hiredis-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9a7c987e161e3c58f992c63b7e26fea7fe0777f3b975799d23d65bbb8cb5899"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:5a4bcef114fc071d5f52c386c47f35aae0a5b43673197b9288a15b584da8fa3a"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:232d0a70519865741ba56e1dfefd160a580ae78c30a1517bad47b3cf95a3bc7d"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9076ce8429785c85f824650735791738de7143f61f43ae9ed83e163c0ca0fa44"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec58fb7c2062f835595c12f0f02dcda76d0eb0831423cc191d1e18c9276648de"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f2b34a6444b8f9c1e9f84bd2c639388e5d14f128afd14a869dfb3d9af893aa2"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:818dfd310aa1020a13cd08ee48e116dd8c3bb2e23b8161f8ac4df587dd5093d7"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d9ea6c8d4cbdeee2e0d43379ce2881e4af0454b00570677c59f33f2531cd38"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1eadbcd3de55ac42310ff82550d3302cb4efcd4e17d76646a17b6e7004bb42b"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:477c34c4489666dc73cb5e89dafe2617c3e13da1298917f73d55aac4696bd793"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:14824e457e4f5cda685c3345d125da13949bcf3bb1c88eb5d248c8d2c3dee08f"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9cd32326dfa6ce87edf754153b0105aca64486bebe93b9600ccff74fa0b224df"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:51341e70b467004dcbec3a6ce8c478d2d6241e0f6b01e4c56764afd5022e1e9d"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2443659c76b226267e2a04dbbb21bc2a3f91aa53bdc0c22964632753ae43a247"},
+ {file = "hiredis-2.2.3-cp38-cp38-win32.whl", hash = "sha256:4e3e3e31423f888d396b1fc1f936936e52af868ac1ec17dd15e3eeba9dd4de24"},
+ {file = "hiredis-2.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:20f509e3a1a20d6e5f5794fc37ceb21f70f409101fcfe7a8bde783894d51b369"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:d20891e3f33803b26d54c77fd5745878497091e33f4bbbdd454cf6e71aee8890"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:50171f985e17970f87d5a29e16603d1e5b03bdbf5c2691a37e6c912942a6b657"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9944a2cac25ffe049a7e89f306e11b900640837d1ef38d9be0eaa4a4e2b73a52"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a5c8019ff94988d56eb49b15de76fe83f6b42536d76edeb6565dbf7fe14b973"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a286ded34eb16501002e3713b3130c987366eee2ba0d58c33c72f27778e31676"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e974ad15eb32b1f537730dea70b93a4c3db7b026de3ad2b59da49c6f7454d"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08415ea74c1c29b9d6a4ca3dd0e810dc1af343c1d1d442e15ba133b11ab5be6a"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e17d04ea58ab8cf3f2dc52e875db16077c6357846006780086fff3189fb199d"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6ccdcb635dae85b006592f78e32d97f4bc7541cb27829d505f9c7fefcef48298"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69536b821dd1bc78058a6e7541743f8d82bf2d981b91280b14c4daa6cdc7faba"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:3753df5f873d473f055e1f8837bfad0bd3b277c86f3c9bf058c58f14204cd901"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6f88cafe46612b6fa68e6dea49e25bebf160598bba00101caa51cc8c1f18d597"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33ee3ea5cad3a8cb339352cd230b411eb437a2e75d7736c4899acab32056ccdb"},
+ {file = "hiredis-2.2.3-cp39-cp39-win32.whl", hash = "sha256:b4f3d06dc16671b88a13ae85d8ca92534c0b637d59e49f0558d040a691246422"},
+ {file = "hiredis-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4f674e309cd055ee7a48304ceb8cf43265d859faf4d7d01d270ce45e976ae9d3"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8f280ab4e043b089777b43b4227bdc2035f88da5072ab36588e0ccf77d45d058"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c2a551f3b8a26f7940d6ee10b837810201754b8d7e6f6b1391655370882c5a"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c4e3c258eafaab21b174b17270a0cc093718d61cdbde8c03f85ec4bf835343"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc36a9dded458d4e37492fe3e619c6c83caae794d26ad925adbce61d592f8428"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:4ed68a3b1ccb4313d2a42546fd7e7439ad4745918a48b6c9bcaa61e1e3e42634"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3bf4b5bae472630c229518e4a814b1b68f10a3d9b00aeaec45f1a330f03a0251"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33a94d264e6e12a79d9bb8af333b01dc286b9f39c99072ab5fef94ce1f018e17"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fa6811a618653164f918b891a0fa07052bd71a799defa5c44d167cac5557b26"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af33f370be90b48bbaf0dab32decbdcc522b1fa95d109020a963282086518a8e"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b9953d87418ac228f508d93898ab572775e4d3b0eeb886a1a7734553bcdaf291"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5e7bb4dd524f50b71c20ef5a12bd61da9b463f8894b18a06130942fe31509881"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89a258424158eb8b3ed9f65548d68998da334ef155d09488c5637723eb1cd697"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f4a65276f6ecdebe75f2a53f578fbc40e8d2860658420d5e0611c56bbf5054c"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:334f2738700b20faa04a0d813366fb16ed17287430a6b50584161d5ad31ca6d7"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d194decd9608f11c777946f596f31d5aacad13972a0a87829ae1e6f2d26c1885"},
+ {file = "hiredis-2.2.3.tar.gz", hash = "sha256:e75163773a309e56a9b58165cf5a50e0f84b755f6ff863b2c01a38918fe92daa"},
+]
+
+[[package]]
+name = "idna"
+version = "3.4"
+requires_python = ">=3.5"
+summary = "Internationalized Domain Names in Applications (IDNA)"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+requires_python = ">=3.8"
+summary = "Python port of markdown-it. Markdown parsing, done right!"
+dependencies = [
+ "mdurl~=0.1",
+]
+files = [
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+requires_python = ">=3.7"
+summary = "Markdown URL utilities"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "more-itertools"
+version = "9.1.0"
+requires_python = ">=3.7"
+summary = "More routines for operating on iterables, beyond itertools"
+files = [
+ {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"},
+ {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"},
+]
+
+[[package]]
+name = "msal"
+version = "1.24.1"
+requires_python = ">=2.7"
+summary = "The Microsoft Authentication Library (MSAL) for Python library"
+dependencies = [
+ "PyJWT[crypto]<3,>=1.0.0",
+ "cryptography<44,>=0.6",
+ "requests<3,>=2.0.0",
+]
+files = [
+ {file = "msal-1.24.1-py2.py3-none-any.whl", hash = "sha256:ce4320688f95c301ee74a4d0e9dbcfe029a63663a8cc61756f40d0d0d36574ad"},
+ {file = "msal-1.24.1.tar.gz", hash = "sha256:aa0972884b3c6fdec53d9a0bd15c12e5bd7b71ac1b66d746f54d128709f3f8f8"},
+]
+
+[[package]]
+name = "multidict"
+version = "6.0.4"
+requires_python = ">=3.7"
+summary = "multidict implementation"
+files = [
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
+ {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
+ {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
+ {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
+ {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
+ {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
+ {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
+ {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
+ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
+ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
+]
+
+[[package]]
+name = "nest-asyncio"
+version = "1.5.8"
+requires_python = ">=3.5"
+summary = "Patch asyncio to allow nested event loops"
+files = [
+ {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"},
+ {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"},
+]
+
+[[package]]
+name = "numpy"
+version = "1.24.4"
+requires_python = ">=3.8"
+summary = "Fundamental package for array computing in Python"
+files = [
+ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
+ {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"},
+ {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"},
+ {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"},
+ {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"},
+ {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"},
+ {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"},
+ {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"},
+ {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"},
+ {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"},
+ {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"},
+]
+
+[[package]]
+name = "openai"
+version = "0.28.1"
+requires_python = ">=3.7.1"
+summary = "Python client library for the OpenAI API"
+dependencies = [
+ "aiohttp",
+ "requests>=2.20",
+ "tqdm",
+]
+files = [
+ {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"},
+ {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"},
+]
+
+[[package]]
+name = "pandas"
+version = "2.0.3"
+requires_python = ">=3.8"
+summary = "Powerful data structures for data analysis, time series, and statistics"
+dependencies = [
+ "numpy>=1.20.3; python_version < \"3.10\"",
+ "numpy>=1.21.0; python_version >= \"3.10\"",
+ "numpy>=1.23.2; python_version >= \"3.11\"",
+ "python-dateutil>=2.8.2",
+ "pytz>=2020.1",
+ "tzdata>=2022.1",
+]
+files = [
+ {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"},
+ {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"},
+ {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"},
+ {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"},
+ {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"},
+ {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"},
+ {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"},
+ {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"},
+ {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"},
+ {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"},
+ {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"},
+ {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"},
+ {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"},
+ {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"},
+ {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"},
+ {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"},
+ {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"},
+ {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"},
+ {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"},
+ {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"},
+ {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"},
+ {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"},
+ {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"},
+ {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"},
+ {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"},
+]
+
+[[package]]
+name = "platformdirs"
+version = "3.11.0"
+requires_python = ">=3.7"
+summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+files = [
+ {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"},
+ {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"},
+]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+summary = "C parser in Python"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pydantic"
+version = "2.0.3"
+requires_python = ">=3.7"
+summary = "Data validation using Python type hints"
+dependencies = [
+ "annotated-types>=0.4.0",
+ "pydantic-core==2.3.0",
+ "typing-extensions>=4.6.1",
+]
+files = [
+ {file = "pydantic-2.0.3-py3-none-any.whl", hash = "sha256:614eb3321eb600c81899a88fa9858b008e3c79e0d4f1b49ab1f516b4b0c27cfb"},
+ {file = "pydantic-2.0.3.tar.gz", hash = "sha256:94f13e0dcf139a5125e88283fc999788d894e14ed90cf478bcc2ee50bd4fc630"},
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.3.0"
+requires_python = ">=3.7"
+summary = ""
+dependencies = [
+ "typing-extensions!=4.7.0,>=4.6.0",
+]
+files = [
+ {file = "pydantic_core-2.3.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:4542c98b8364b976593703a2dda97377433b102f380b61bc3a2cbc2fbdae1d1f"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9342de50824b40f55d2600f66c6f9a91a3a24851eca39145a749a3dc804ee599"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:539432f911686cb80284c30b33eaf9f4fd9a11e1111fe0dc98fdbdce69b49821"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38a0e7ee65c8999394d92d9c724434cb629279d19844f2b69d9bbc46dc8b8b61"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_24_armv7l.whl", hash = "sha256:e3ed6834cc005798187a56c248a2240207cb8ffdda1c89e9afda4c3d526c2ea0"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:e72ac299a6bf732a60852d052acf3999d234686755a02ba111e85e7ebf8155b1"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_24_s390x.whl", hash = "sha256:616b3451b05ca63b8f433c627f68046b39543faeaa4e50d8c6699a2a1e4b85a5"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:adcb9c8848e15c613e483e0b99767ae325af27fe0dbd866df01fe5849d06e6e1"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:464bf799b422be662e5e562e62beeffc9eaa907d381a9d63a2556615bbda286d"},
+ {file = "pydantic_core-2.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4638ebc17de08c2f3acba557efeb6f195c88b7299d8c55c0bb4e20638bbd4d03"},
+ {file = "pydantic_core-2.3.0-cp310-none-win32.whl", hash = "sha256:9ff322c7e1030543d35d83bb521b69114d3d150750528d7757544f639def9ad6"},
+ {file = "pydantic_core-2.3.0-cp310-none-win_amd64.whl", hash = "sha256:4824eb018f0a4680b1e434697a9bf3f41c7799b80076d06530cbbd212e040ccc"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:0aa429578e23885b3984c49d687cd05ab06f0b908ea1711a8bf7e503b7f97160"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20d710c1f79af930b8891bcebd84096798e4387ab64023ef41521d58f21277d3"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:309f45d4d7481d6f09cb9e35c72caa0e50add4a30bb08c04c5fe5956a0158633"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bcfb7be905aa849bd882262e1df3f75b564e2f708b4b4c7ad2d3deaf5410562"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_24_armv7l.whl", hash = "sha256:85cd9c0af34e371390e3cb2f3a470b0b40cc07568c1e966c638c49062be6352d"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_24_ppc64le.whl", hash = "sha256:37c5028cebdf731298724070838fb3a71ef1fbd201d193d311ac2cbdbca25a23"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_24_s390x.whl", hash = "sha256:e4208f23f12d0ad206a07a489ef4cb15722c10b62774c4460ee4123250be938e"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c24465dd11b65c8510f251b095fc788c7c91481c81840112fe3f76c30793a455"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3cd7ee8bbfab277ab56e272221886fd33a1b5943fbf45ae9195aa6a48715a8a0"},
+ {file = "pydantic_core-2.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7e0b056b66cc536e97ef60f48b3b289f6b3b62ac225afd4b22a42434617bf"},
+ {file = "pydantic_core-2.3.0-cp311-none-win32.whl", hash = "sha256:4788135db4bd83a5edc3522b11544b013be7d25b74b155e08dd3b20cd6663bbb"},
+ {file = "pydantic_core-2.3.0-cp311-none-win_amd64.whl", hash = "sha256:f93c867e5e85584a28c6a6feb6f2086d717266eb5d1210d096dd717b7f4dec04"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:73f62bb7fd862d9bcd886e10612bade6fe042eda8b47e8c129892bcfb7b45e84"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d889d498fce64bfcd8adf1a78579a7f626f825cbeb2956a24a29b35f9a1df32"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d55e38a89ec2ae17b2fa7ffeda6b70f63afab1888bd0d57aaa7b7879760acb4"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1aefebb506bc1fe355d91d25f12bcdea7f4d7c2d9f0f6716dd025543777c99a5"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_24_armv7l.whl", hash = "sha256:6441a29f42585f085db0c04cd0557d4cbbb46fa68a0972409b1cfe9f430280c1"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_24_ppc64le.whl", hash = "sha256:47e8f034be31390a8f525431eb5e803a78ce7e2e11b32abf5361a972e14e6b61"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_24_s390x.whl", hash = "sha256:ad814864aba263be9c83ada44a95f72d10caabbf91589321f95c29c902bdcff0"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9eff3837d447fccf2ac38c259b14ab9cbde700df355a45a1f3ff244d5e78f8b6"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:534f3f63c000f08050c6f7f4378bf2b52d7ba9214e9d35e3f60f7ad24a4d6425"},
+ {file = "pydantic_core-2.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ef6a222d54f742c24f6b143aab088702db3a827b224e75b9dd28b38597c595fe"},
+ {file = "pydantic_core-2.3.0-cp312-none-win32.whl", hash = "sha256:4e26944e64ecc1d7b19db954c0f7b471f3b141ec8e1a9f57cfe27671525cd248"},
+ {file = "pydantic_core-2.3.0-cp312-none-win_amd64.whl", hash = "sha256:019c5c41941438570dfc7d3f0ae389b2425add1775a357ce1e83ed1434f943d6"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:0b3d781c71b8bfb621ef23b9c874933e2cd33237c1a65cc20eeb37437f8e7e18"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad46027dbd5c1db87dc0b49becbe23093b143a20302028d387dae37ee5ef95f5"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39aa09ed7ce2a648c904f79032d16dda29e6913112af8465a7bf710eef23c7ca"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05b4bf8c58409586a7a04c858a86ab10f28c6c1a7c33da65e0326c59d5b0ab16"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_24_armv7l.whl", hash = "sha256:ba2b807d2b62c446120906b8580cddae1d76d3de4efbb95ccc87f5e35c75b4b2"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:ea955e4ed21f4bbb9b83fea09fc6af0bed82e69ecf6b35ec89237a0a49633033"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_24_s390x.whl", hash = "sha256:06884c07956526ac9ebfef40fe21a11605569b8fc0e2054a375fb39c978bf48f"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f868e731a18b403b88aa434d960489ceeed0ddeb44ebc02389540731a67705e0"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cb08fab0fc1db15c277b72e33ac74ad9c0c789413da8984a3eacb22a94b42ef4"},
+ {file = "pydantic_core-2.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6ca34c29fbd6592de5fd39e80c1993634d704c4e7e14ba54c87b2c7c53da68fe"},
+ {file = "pydantic_core-2.3.0-cp38-none-win32.whl", hash = "sha256:cd782807d35c8a41aaa7d30b5107784420eefd9fdc1c760d86007d43ae00b15d"},
+ {file = "pydantic_core-2.3.0-cp38-none-win_amd64.whl", hash = "sha256:01f56d5ee70b1d39c0fd08372cc5142274070ab7181d17c86035f130eebc05b8"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:78b1ac0151271ce62bc2b33755f1043eda6a310373143a2f27e2bcd3d5fc8633"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64bfd2c35a2c350f73ac52dc134d8775f93359c4c969280a6fe5301b5b6e7431"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:937c0fe9538f1212b62df6a68f8d78df3572fe3682d9a0dd8851eac8a4e46063"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d965c7c4b40d1cedec9188782e98bd576f9a04868835604200c3a6e817b824f"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_24_armv7l.whl", hash = "sha256:ad442b8585ed4a3c2d22e4bf7b465d9b7d281e055b09719a8aeb5b576422dc9b"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:4bf20c9722821fce766e685718e739deeccc60d6bc7be5029281db41f999ee0c"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_24_s390x.whl", hash = "sha256:f3dd5333049b5b3faa739e0f40b77cc8b7a1aded2f2da0e28794c81586d7b08a"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dc5f516b24d24bc9e8dd9305460899f38302b3c4f9752663b396ef9848557bf"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:055f7ea6b1fbb37880d66d70eefd22dd319b09c79d2cb99b1dbfeb34b653b0b2"},
+ {file = "pydantic_core-2.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:af693a89db6d6ac97dd84dd7769b3f2bd9007b578127d0e7dda03053f4d3b34b"},
+ {file = "pydantic_core-2.3.0-cp39-none-win32.whl", hash = "sha256:f60e31e3e15e8c294bf70c60f8ae4d0c3caf3af8f26466e9aa8ea4c01302749b"},
+ {file = "pydantic_core-2.3.0-cp39-none-win_amd64.whl", hash = "sha256:2b79f3681481f4424d7845cc7a261d5a4baa810d656b631fa844dc9967b36a7b"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:a666134b41712e30a71afaa26deeb4da374179f769fa49784cdf0e7698880fab"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c119e9227487ad3d7c3c737d896afe548a6be554091f9745da1f4b489c40561"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73929a2fb600a2333fce2efd92596cff5e6bf8946e20e93c067b220760064862"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:41bbc2678a5b6a19371b2cb51f30ccea71f0c14b26477d2d884fed761cea42c7"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dcbff997f47d45bf028bda4c3036bb3101e89a3df271281d392b6175f71c71d1"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:afa8808159169368b66e4fbeafac6c6fd8f26246dc4d0dcc2caf94bd9cf1b828"},
+ {file = "pydantic_core-2.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:12be3b5f54f8111ca38e6b7277f26c23ba5cb3344fae06f879a0a93dfc8b479e"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed5babdcd3d052ba5cf8832561f18df20778c7ccf12587b2d82f7bf3bf259a0e"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d642e5c029e2acfacf6aa0a7a3e822086b3b777c70d364742561f9ca64c1ffc"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ba3073eb38a1294e8c7902989fb80a7a147a69db2396818722bd078476586a0"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5146a6749b1905e04e62e0ad4622f079e5582f8b3abef5fb64516c623127908"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:deeb64335f489c3c11949cbd1d1668b3f1fb2d1c6a5bf40e126ef7bf95f9fa40"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:31acc37288b8e69e4849f618c3d5cf13b58077c1a1ff9ade0b3065ba974cd385"},
+ {file = "pydantic_core-2.3.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e09d9f6d722de9d4c1c5f122ea9bc6b25a05f975457805af4dcab7b0128aacbf"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ba6a8cf089222a171b8f84e6ec2d10f7a9d14f26be3a347b14775a8741810676"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef1fd1b24e9bcddcb168437686677104e205c8e25b066e73ffdf331d3bb8792b"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eda1a89c4526826c0a87d33596a4cd15b8f58e9250f503e39af1699ba9c878e8"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3e9a18401a28db4358da2e191508702dbf065f2664c710708cdf9552b9fa50c"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a439fd0d45d51245bbde799726adda5bd18aed3fa2b01ab2e6a64d6d13776fa3"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:bf6a1d2c920cc9528e884850a4b2ee7629e3d362d5c44c66526d4097bbb07a1a"},
+ {file = "pydantic_core-2.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e33fcbea3b63a339dd94de0fc442fefacfe681cc7027ce63f67af9f7ceec7422"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:bf3ed993bdf4754909f175ff348cf8f78d4451215b8aa338633f149ca3b1f37a"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7584171eb3115acd4aba699bc836634783f5bd5aab131e88d8eeb8a3328a4a72"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1624baa76d1740711b2048f302ae9a6d73d277c55a8c3e88b53b773ebf73a971"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:06f33f695527f5a86e090f208978f9fd252c9cfc7e869d3b679bd71f7cb2c1fa"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7ecf0a67b212900e92f328181fed02840d74ed39553cdb38d27314e2b9c89dfa"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:45fa1e8ad6f4367ad73674ca560da8e827cc890eaf371f3ee063d6d7366a207b"},
+ {file = "pydantic_core-2.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8d0dbcc57839831ae79fd24b1b83d42bc9448d79feaf3ed3fb5cbf94ffbf3eb7"},
+ {file = "pydantic_core-2.3.0.tar.gz", hash = "sha256:5cfb5ac4e82c47d5dc25b209dd4c3989e284b80109f9e08b33c895080c424b4f"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.16.1"
+requires_python = ">=3.7"
+summary = "Pygments is a syntax highlighting package written in Python."
+files = [
+ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"},
+ {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"},
+]
+
+[[package]]
+name = "pygtrie"
+version = "2.5.0"
+summary = "A pure Python trie data structure implementation."
+files = [
+ {file = "pygtrie-2.5.0-py3-none-any.whl", hash = "sha256:8795cda8105493d5ae159a5bef313ff13156c5d4d72feddefacaad59f8c8ce16"},
+ {file = "pygtrie-2.5.0.tar.gz", hash = "sha256:203514ad826eb403dab1d2e2ddd034e0d1534bbe4dbe0213bb0593f66beba4e2"},
+]
+
+[[package]]
+name = "pyjwt"
+version = "2.8.0"
+requires_python = ">=3.7"
+summary = "JSON Web Token implementation in Python"
+files = [
+ {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"},
+ {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"},
+]
+
+[[package]]
+name = "pyjwt"
+version = "2.8.0"
+extras = ["crypto"]
+requires_python = ">=3.7"
+summary = "JSON Web Token implementation in Python"
+dependencies = [
+ "PyJWT==2.8.0",
+ "cryptography>=3.4.0",
+]
+files = [
+ {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"},
+ {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"},
+]
+
+[[package]]
+name = "pyparsing"
+version = "3.1.1"
+requires_python = ">=3.6.8"
+summary = "pyparsing module - Classes and methods to define and execute parsing grammars"
+files = [
+ {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"},
+ {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"},
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+summary = "Extensions to the standard Python datetime module"
+dependencies = [
+ "six>=1.5",
+]
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[[package]]
+name = "python-ulid"
+version = "1.1.0"
+requires_python = ">=3.7"
+summary = "Universally Unique Lexicographically Sortable Identifier"
+files = [
+ {file = "python-ulid-1.1.0.tar.gz", hash = "sha256:5fb5e4a91db8ca93e8938a613360b3def299b60d41f847279a8c39c9b2e9c65e"},
+ {file = "python_ulid-1.1.0-py3-none-any.whl", hash = "sha256:88c952f6be133dbede19c907d72d26717d2691ec8421512b573144794d891e24"},
+]
+
+[[package]]
+name = "pytz"
+version = "2023.3.post1"
+summary = "World timezone definitions, modern and historical"
+files = [
+ {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"},
+ {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"},
+]
+
+[[package]]
+name = "redis"
+version = "4.6.0"
+requires_python = ">=3.7"
+summary = "Python client for Redis database and key-value store"
+dependencies = [
+ "async-timeout>=4.0.2; python_full_version <= \"3.11.2\"",
+]
+files = [
+ {file = "redis-4.6.0-py3-none-any.whl", hash = "sha256:e2b03db868160ee4591de3cb90d40ebb50a90dd302138775937f6a42b7ed183c"},
+ {file = "redis-4.6.0.tar.gz", hash = "sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d"},
+]
+
+[[package]]
+name = "redis-om"
+version = "0.2.1"
+requires_python = ">=3.7,<4.0"
+summary = "Object mappings, and more, for Redis."
+dependencies = [
+ "click<9.0.0,>=8.0.1",
+ "hiredis<3.0.0,>=2.2.3",
+ "more-itertools<10.0,>=8.14",
+ "pydantic<2.1.0,>=1.10.2",
+ "python-ulid<2.0.0,>=1.0.3",
+ "redis<5.0.0,>=3.5.3",
+ "types-redis<5.0.0,>=3.5.9",
+ "typing-extensions<5.0.0,>=4.4.0",
+]
+files = [
+ {file = "redis_om-0.2.1-py3-none-any.whl", hash = "sha256:31313a3027a014608b3a4d44ecd1d3000c7d0fe3a25060db19b42225e636cd53"},
+ {file = "redis_om-0.2.1.tar.gz", hash = "sha256:150c9cb5238d6003f35e9b6394aab30a0df35b00e955eb7dc508f4345e0a0ccc"},
+]
+
+[[package]]
+name = "regex"
+version = "2023.10.3"
+requires_python = ">=3.7"
+summary = "Alternative regular expression module, to replace re."
+files = [
+ {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"},
+ {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"},
+ {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"},
+ {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"},
+ {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"},
+ {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"},
+ {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"},
+ {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"},
+ {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"},
+ {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"},
+ {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"},
+ {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"},
+ {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"},
+ {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"},
+ {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"},
+ {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"},
+ {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"},
+ {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"},
+ {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"},
+ {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"},
+ {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"},
+ {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"},
+ {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"},
+ {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"},
+ {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"},
+ {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"},
+ {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"},
+ {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"},
+ {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"},
+ {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"},
+ {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"},
+ {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"},
+ {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"},
+ {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"},
+ {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"},
+ {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"},
+ {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"},
+ {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"},
+ {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"},
+ {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"},
+ {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"},
+ {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"},
+ {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"},
+ {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"},
+ {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"},
+ {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"},
+ {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"},
+ {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"},
+ {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"},
+ {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"},
+ {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"},
+ {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"},
+ {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"},
+ {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"},
+ {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"},
+ {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"},
+ {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"},
+ {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"},
+ {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"},
+]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+requires_python = ">=3.7"
+summary = "Python HTTP for Humans."
+dependencies = [
+ "certifi>=2017.4.17",
+ "charset-normalizer<4,>=2",
+ "idna<4,>=2.5",
+ "urllib3<3,>=1.21.1",
+]
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[[package]]
+name = "rich"
+version = "13.6.0"
+requires_python = ">=3.7.0"
+summary = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+dependencies = [
+ "markdown-it-py>=2.2.0",
+ "pygments<3.0.0,>=2.13.0",
+ "typing-extensions<5.0,>=4.0.0; python_version < \"3.9\"",
+]
+files = [
+ {file = "rich-13.6.0-py3-none-any.whl", hash = "sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245"},
+ {file = "rich-13.6.0.tar.gz", hash = "sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef"},
+]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+summary = "Python 2 and 3 compatibility utilities"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "tiktoken"
+version = "0.5.1"
+requires_python = ">=3.8"
+summary = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
+dependencies = [
+ "regex>=2022.1.18",
+ "requests>=2.26.0",
+]
+files = [
+ {file = "tiktoken-0.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b0bae3fd56de1c0a5874fb6577667a3c75bf231a6cef599338820210c16e40a"},
+ {file = "tiktoken-0.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e529578d017045e2f0ed12d2e00e7e99f780f477234da4aae799ec4afca89f37"},
+ {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edd2ffbb789712d83fee19ab009949f998a35c51ad9f9beb39109357416344ff"},
+ {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c73d47bdc1a3f1f66ffa019af0386c48effdc6e8797e5e76875f6388ff72e9"},
+ {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46b8554b9f351561b1989157c6bb54462056f3d44e43aa4e671367c5d62535fc"},
+ {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92ed3bbf71a175a6a4e5fbfcdb2c422bdd72d9b20407e00f435cf22a68b4ea9b"},
+ {file = "tiktoken-0.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:714efb2f4a082635d9f5afe0bf7e62989b72b65ac52f004eb7ac939f506c03a4"},
+ {file = "tiktoken-0.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a10488d1d1a5f9c9d2b2052fdb4cf807bba545818cb1ef724a7f5d44d9f7c3d4"},
+ {file = "tiktoken-0.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8079ac065572fe0e7c696dbd63e1fdc12ce4cdca9933935d038689d4732451df"},
+ {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ef730db4097f5b13df8d960f7fdda2744fe21d203ea2bb80c120bb58661b155"},
+ {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426e7def5f3f23645dada816be119fa61e587dfb4755de250e136b47a045c365"},
+ {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:323cec0031358bc09aa965c2c5c1f9f59baf76e5b17e62dcc06d1bb9bc3a3c7c"},
+ {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5abd9436f02e2c8eda5cce2ff8015ce91f33e782a7423de2a1859f772928f714"},
+ {file = "tiktoken-0.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:1fe99953b63aabc0c9536fbc91c3c9000d78e4755edc28cc2e10825372046a2d"},
+ {file = "tiktoken-0.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dcdc630461927718b317e6f8be7707bd0fc768cee1fdc78ddaa1e93f4dc6b2b1"},
+ {file = "tiktoken-0.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1f2b3b253e22322b7f53a111e1f6d7ecfa199b4f08f3efdeb0480f4033b5cdc6"},
+ {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43ce0199f315776dec3ea7bf86f35df86d24b6fcde1babd3e53c38f17352442f"},
+ {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a84657c083d458593c0235926b5c993eec0b586a2508d6a2020556e5347c2f0d"},
+ {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c008375c0f3d97c36e81725308699116cd5804fdac0f9b7afc732056329d2790"},
+ {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:779c4dea5edd1d3178734d144d32231e0b814976bec1ec09636d1003ffe4725f"},
+ {file = "tiktoken-0.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:b5dcfcf9bfb798e86fbce76d40a1d5d9e3f92131aecfa3d1e5c9ea1a20f1ef1a"},
+ {file = "tiktoken-0.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b180a22db0bbcc447f691ffc3cf7a580e9e0587d87379e35e58b826ebf5bc7b"},
+ {file = "tiktoken-0.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b756a65d98b7cf760617a6b68762a23ab8b6ef79922be5afdb00f5e8a9f4e76"},
+ {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba9873c253ca1f670e662192a0afcb72b41e0ba3e730f16c665099e12f4dac2d"},
+ {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c90d2be0b4c1a2b3f7dde95cd976757817d4df080d6af0ee8d461568c2e2ad"},
+ {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:709a5220891f2b56caad8327fab86281787704931ed484d9548f65598dea9ce4"},
+ {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d5a187ff9c786fae6aadd49f47f019ff19e99071dc5b0fe91bfecc94d37c686"},
+ {file = "tiktoken-0.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:e21840043dbe2e280e99ad41951c00eff8ee3b63daf57cd4c1508a3fd8583ea2"},
+ {file = "tiktoken-0.5.1.tar.gz", hash = "sha256:27e773564232004f4f810fd1f85236673ec3a56ed7f1206fc9ed8670ebedb97a"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.66.1"
+requires_python = ">=3.7"
+summary = "Fast, Extensible Progress Meter"
+dependencies = [
+ "colorama; platform_system == \"Windows\"",
+]
+files = [
+ {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
+ {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
+]
+
+[[package]]
+name = "types-pyopenssl"
+version = "23.2.0.2"
+summary = "Typing stubs for pyOpenSSL"
+dependencies = [
+ "cryptography>=35.0.0",
+]
+files = [
+ {file = "types-pyOpenSSL-23.2.0.2.tar.gz", hash = "sha256:6a010dac9ecd42b582d7dd2cc3e9e40486b79b3b64bb2fffba1474ff96af906d"},
+ {file = "types_pyOpenSSL-23.2.0.2-py3-none-any.whl", hash = "sha256:19536aa3debfbe25a918cf0d898e9f5fbbe6f3594a429da7914bf331deb1b342"},
+]
+
+[[package]]
+name = "types-redis"
+version = "4.6.0.7"
+summary = "Typing stubs for redis"
+dependencies = [
+ "cryptography>=35.0.0",
+ "types-pyOpenSSL",
+]
+files = [
+ {file = "types-redis-4.6.0.7.tar.gz", hash = "sha256:28c4153ddb5c9d4f10def44a2454673c361d2d5fc3cd867cf3bb1520f3f59a38"},
+ {file = "types_redis-4.6.0.7-py3-none-any.whl", hash = "sha256:05b1bf92879b25df20433fa1af07784a0d7928c616dc2ebf9087618db77ccbd0"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.8.0"
+requires_python = ">=3.8"
+summary = "Backported and Experimental Type Hints for Python 3.8+"
+files = [
+ {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
+ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+]
+
+[[package]]
+name = "tzdata"
+version = "2023.3"
+requires_python = ">=2"
+summary = "Provider of IANA time zone data"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.0.7"
+requires_python = ">=3.7"
+summary = "HTTP library with thread-safe connection pooling, file post, and more."
+files = [
+ {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
+ {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"},
+]
+
+[[package]]
+name = "yarl"
+version = "1.9.2"
+requires_python = ">=3.7"
+summary = "Yet another URL library"
+dependencies = [
+ "idna>=2.0",
+ "multidict>=4.0",
+]
+files = [
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
+ {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
+ {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
+ {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
+ {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
+ {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
+ {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
+ {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
+ {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
+ {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
+]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..06219952
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,34 @@
+[project]
+name = "adala"
+version = "0.0.1"
+description = "Adala: Autonomous Data Labeling Agent"
+authors = [
+ {name = "Human Signal", email = "hello@humansignal.com"},
+]
+dependencies = [
+ "pandas",
+ "openai",
+ "guidance",
+ "pydantic>=2",
+ "rich>=13",
+ "redis-om",
+]
+requires-python = ">=3.8.8"
+readme = "README.md"
+# [project.optional-dependencies]
+# label-studio = [
+# "label-studio-sdk @ git+https://github.com/HumanSignal/label-studio-sdk.git@pd-support",
+# ]
+# jupyter = [
+# "jupyter",
+# ]
+# docs = [
+# "sphinx>=7.1.2",
+# "sphinx-rtd-theme>=1.3.0",
+# "myst-parser>=2.0.0",
+# ]
+
+[build-system]
+requires = ["pdm-backend"]
+build-backend = "pdm.backend"
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..620d7ee1
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,7 @@
+pandas
+openai
+guidance
+label-studio-sdk @ git+https://github.com/HumanSignal/label-studio-sdk.git@pd-support
+rich~=13.6
+pydantic
+redis-om
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..0cd185b4
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,28 @@
+"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
+"""
+import re
+import setuptools
+
+# Module dependencies
+requirements, dependency_links = [], []
+with open('requirements.txt') as f:
+ for line in f.read().splitlines():
+ requirements.append(line)
+
+setuptools.setup(
+ name='adala',
+ version='0.0.1',
+ author='Heartex',
+ author_email="hello@humansignal.com",
+ description='ADALA: Automated Data Labeling Agent',
+ url='https://github.com/HumanSignal/ADALA',
+ packages=setuptools.find_packages(),
+ include_package_data=True,
+ classifiers=[
+ 'Programming Language :: Python :: 3',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ ],
+ python_requires='>=3.8',
+ install_requires=requirements
+)
diff --git a/tests/requirements-test.txt b/tests/requirements-test.txt
new file mode 100644
index 00000000..c63eb541
--- /dev/null
+++ b/tests/requirements-test.txt
@@ -0,0 +1,5 @@
+pytest==6.2.5
+pytest-cov==3.0.0
+pytest-env==0.6.2
+# fakeredis==1.5.0
+# pytest-xdist
\ No newline at end of file
diff --git a/tests/test_classification.py b/tests/test_classification.py
new file mode 100644
index 00000000..92a2b1e1
--- /dev/null
+++ b/tests/test_classification.py
@@ -0,0 +1,89 @@
+import pandas as pd
+from unittest.mock import MagicMock, patch
+from adala.runtimes.openai import OpenAIRuntime
+
+from adala.agents import Agent
+from adala.datasets import DataFrameDataset
+from adala.environments import BasicEnvironment
+from adala.skills import ClassificationSkill
+from adala.utils.logs import print_dataframe
+
+
+def process_record_generator(*args, **kwargs):
+ # train
+ for i in range(3):
+ # predictions for gt comparison
+ yield {'sentiment': 'Neutral' if i < 2 else 'Positive'}
+ yield {'sentiment': 'Neutral' if i < 2 else 'Negative'}
+ yield {'sentiment': 'Neutral'}
+
+ # errors
+ yield {'reason': 'Test reason'}
+ yield {'reason': 'Test reason'}
+ yield {'reason': 'Test reason'}
+ yield {'reason': 'Test reason'}
+
+ # instruction generation
+ yield {'new_instruction': 'Test instruction'}
+
+ # test
+ yield {'sentiment': 'Positive'}
+ yield {'sentiment': 'Negative'}
+ yield {'sentiment': 'Neutral'}
+
+
+@patch.object(OpenAIRuntime, '_check_api_key', return_value=None)
+@patch.object(OpenAIRuntime, '_check_model_availability', return_value=None)
+@patch.object(OpenAIRuntime, '_process_record', side_effect=process_record_generator())
+def test_classification_skill(
+ mock_check_api_key,
+ mock_check_model_availability,
+ mock_process_record
+):
+ print("=> Initialize datasets ...")
+
+ # Train dataset
+ train_df = pd.DataFrame([
+ ["It was the negative first impressions, and then it started working.", "Positive"],
+ ["Not loud enough and doesn't turn on like it should.", "Negative"],
+ ["I don't know what to say.", "Neutral"],
+ ], columns=["text", "ground_truth"])
+
+ # Test dataset
+ test_df = pd.DataFrame([
+ "All three broke within two months of use.",
+ "The device worked for a long time, can't say anything bad.",
+ "Just a random line of text.",
+ ], columns=["text"])
+
+ train_dataset = DataFrameDataset(df=train_df)
+ test_dataset = DataFrameDataset(df=test_df)
+
+ print("=> Initialize and train ADALA agent ...")
+ agent = Agent(
+ # connect to a dataset
+ environment=BasicEnvironment(
+ ground_truth_dataset=train_dataset,
+ ground_truth_column="ground_truth"
+ ),
+ # define a skill
+ skills=ClassificationSkill(
+ name='sentiment',
+ instructions="Label text as subjective or objective.",
+ labels=["Positive", "Negative", "Neutral"],
+ input_data_field='text'
+ ),
+ )
+ run = agent.learn(learning_iterations=3, accuracy_threshold=0.95)
+ assert run.accuracy > 0.8
+
+ print('\n\n=> Final instructions:')
+ print('=====================')
+ print(f'{run.updated_instructions}')
+ print('=====================')
+
+ print('\n=> Run test ...')
+ run = agent.apply_skills(test_dataset)
+ print_dataframe(run.predictions)
+
+ assert not run.predictions.empty