From 4cccb1ac97568ad7121b9b2b9750930d3d66cb96 Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Mon, 5 Aug 2024 11:01:44 +0200 Subject: [PATCH 01/19] upload main files --- CODE_OF_CONDUCT.md | 133 +++++++ CONTRIBUTING.md | 202 ++++++++++ Dockerfile | 36 ++ Dockerfile.dev | 35 ++ Dockerfile.test | 34 ++ LICENSE.txt | 373 ++++++++++++++++++ Makefile | 18 + README.md | 269 +++++++++++++ SECURITY.md | 13 + __pycache__/main.cpython-310.pyc | Bin 0 -> 3830 bytes __pycache__/main_alex.cpython-310.pyc | Bin 0 -> 3048 bytes charts/aina-tts-api/Chart.yaml | 16 + charts/aina-tts-api/templates/deployment.yaml | 35 ++ charts/aina-tts-api/templates/service.yaml | 15 + charts/aina-tts-api/values.yaml | 6 + docker-compose-dev.yml | 18 + docker-compose-gpu.yml | 21 + docker-compose-test.yml | 14 + docker-compose.yml | 13 + infer_wavenext_onnx.py | 170 ++++++++ main.py | 155 ++++++++ main_alex.py | 111 ++++++ requirements.txt | 9 + .../inference_onnx.cpython-310.pyc | Bin 0 -> 6077 bytes scripts/change_model.py | 14 + scripts/inference_onnx.py | 270 +++++++++++++ scripts/speakers_conversion.csv | 8 + server/__init__.py | 34 ++ server/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1253 bytes .../exception_handler.cpython-310.pyc | Bin 0 -> 1043 bytes server/__pycache__/exceptions.cpython-310.pyc | Bin 0 -> 725 bytes server/audio_utils/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 160 bytes .../__pycache__/audio_utils.cpython-310.pyc | Bin 0 -> 2507 bytes server/audio_utils/audio_utils.py | 106 +++++ server/audio_utils/audio_utils_orig.py | 95 +++++ server/exception_handler.py | 23 ++ server/exceptions.py | 8 + server/helper/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 155 bytes .../helper/__pycache__/config.cpython-310.pyc | Bin 0 -> 1605 bytes .../__pycache__/singleton.cpython-310.pyc | Bin 0 -> 663 bytes server/helper/config.py | 122 ++++++ server/helper/singleton.py | 16 + server/modules/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 156 bytes .../tts_request_model.cpython-310.pyc | Bin 0 -> 752 bytes server/modules/tts_request_model.py | 22 ++ server/static/aina_greyscale.png | Bin 0 -> 53580 bytes server/templates/details.html | 131 ++++++ server/templates/index.html | 152 +++++++ server/templates/websocket_demo.html | 147 +++++++ server/tests/__init__.py | 0 server/tests/api/__init__.py | 0 server/tests/api/views/__init__.py | 0 server/tests/api/views/test_api.py | 81 ++++ server/tests/base_test_case.py | 27 ++ server/tests/test_config.py | 14 + server/tests/test_worker.py | 22 ++ server/utils/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 154 bytes .../__pycache__/argparse.cpython-310.pyc | Bin 0 -> 769 bytes .../utils/__pycache__/utils.cpython-310.pyc | Bin 0 -> 1123 bytes server/utils/argparse.py | 13 + server/utils/utils.py | 32 ++ server/views/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 154 bytes server/views/api/__init__.py | 0 .../api/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 158 bytes .../views/api/__pycache__/api.cpython-310.pyc | Bin 0 -> 6186 bytes server/views/api/api.py | 246 ++++++++++++ server/views/health/__init__.py | 32 ++ .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1379 bytes server/workers/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 156 bytes .../__pycache__/workers.cpython-310.pyc | Bin 0 -> 2329 bytes server/workers/workers.py | 142 +++++++ text/__init__.py | 53 +++ text/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 2089 bytes text/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 2126 bytes text/__pycache__/cleaners.cpython-310.pyc | Bin 0 -> 4145 bytes text/__pycache__/cleaners.cpython-39.pyc | Bin 0 -> 4463 bytes text/__pycache__/symbols.cpython-310.pyc | Bin 0 -> 728 bytes text/__pycache__/symbols.cpython-39.pyc | Bin 0 -> 754 bytes text/cleaners.py | 168 ++++++++ text/numbers.py | 71 ++++ text/symbols.py | 17 + 87 files changed, 3762 insertions(+) create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 Dockerfile.dev create mode 100644 Dockerfile.test create mode 100644 LICENSE.txt create mode 100644 Makefile create mode 100644 README.md create mode 100644 SECURITY.md create mode 100644 __pycache__/main.cpython-310.pyc create mode 100644 __pycache__/main_alex.cpython-310.pyc create mode 100644 charts/aina-tts-api/Chart.yaml create mode 100644 charts/aina-tts-api/templates/deployment.yaml create mode 100644 charts/aina-tts-api/templates/service.yaml create mode 100644 charts/aina-tts-api/values.yaml create mode 100644 docker-compose-dev.yml create mode 100644 docker-compose-gpu.yml create mode 100644 docker-compose-test.yml create mode 100644 docker-compose.yml create mode 100644 infer_wavenext_onnx.py create mode 100644 main.py create mode 100644 main_alex.py create mode 100644 requirements.txt create mode 100644 scripts/__pycache__/inference_onnx.cpython-310.pyc create mode 100644 scripts/change_model.py create mode 100644 scripts/inference_onnx.py create mode 100644 scripts/speakers_conversion.csv create mode 100644 server/__init__.py create mode 100644 server/__pycache__/__init__.cpython-310.pyc create mode 100644 server/__pycache__/exception_handler.cpython-310.pyc create mode 100644 server/__pycache__/exceptions.cpython-310.pyc create mode 100644 server/audio_utils/__init__.py create mode 100644 server/audio_utils/__pycache__/__init__.cpython-310.pyc create mode 100644 server/audio_utils/__pycache__/audio_utils.cpython-310.pyc create mode 100644 server/audio_utils/audio_utils.py create mode 100644 server/audio_utils/audio_utils_orig.py create mode 100644 server/exception_handler.py create mode 100644 server/exceptions.py create mode 100644 server/helper/__init__.py create mode 100644 server/helper/__pycache__/__init__.cpython-310.pyc create mode 100644 server/helper/__pycache__/config.cpython-310.pyc create mode 100644 server/helper/__pycache__/singleton.cpython-310.pyc create mode 100644 server/helper/config.py create mode 100644 server/helper/singleton.py create mode 100644 server/modules/__init__.py create mode 100644 server/modules/__pycache__/__init__.cpython-310.pyc create mode 100644 server/modules/__pycache__/tts_request_model.cpython-310.pyc create mode 100644 server/modules/tts_request_model.py create mode 100644 server/static/aina_greyscale.png create mode 100644 server/templates/details.html create mode 100644 server/templates/index.html create mode 100644 server/templates/websocket_demo.html create mode 100644 server/tests/__init__.py create mode 100644 server/tests/api/__init__.py create mode 100644 server/tests/api/views/__init__.py create mode 100644 server/tests/api/views/test_api.py create mode 100644 server/tests/base_test_case.py create mode 100644 server/tests/test_config.py create mode 100644 server/tests/test_worker.py create mode 100644 server/utils/__init__.py create mode 100644 server/utils/__pycache__/__init__.cpython-310.pyc create mode 100644 server/utils/__pycache__/argparse.cpython-310.pyc create mode 100644 server/utils/__pycache__/utils.cpython-310.pyc create mode 100644 server/utils/argparse.py create mode 100644 server/utils/utils.py create mode 100644 server/views/__init__.py create mode 100644 server/views/__pycache__/__init__.cpython-310.pyc create mode 100644 server/views/api/__init__.py create mode 100644 server/views/api/__pycache__/__init__.cpython-310.pyc create mode 100644 server/views/api/__pycache__/api.cpython-310.pyc create mode 100644 server/views/api/api.py create mode 100644 server/views/health/__init__.py create mode 100644 server/views/health/__pycache__/__init__.cpython-310.pyc create mode 100644 server/workers/__init__.py create mode 100644 server/workers/__pycache__/__init__.cpython-310.pyc create mode 100644 server/workers/__pycache__/workers.cpython-310.pyc create mode 100644 server/workers/workers.py create mode 100644 text/__init__.py create mode 100644 text/__pycache__/__init__.cpython-310.pyc create mode 100644 text/__pycache__/__init__.cpython-39.pyc create mode 100644 text/__pycache__/cleaners.cpython-310.pyc create mode 100644 text/__pycache__/cleaners.cpython-39.pyc create mode 100644 text/__pycache__/symbols.cpython-310.pyc create mode 100644 text/__pycache__/symbols.cpython-39.pyc create mode 100644 text/cleaners.py create mode 100644 text/numbers.py create mode 100644 text/symbols.py diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f0c1209 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +langtech@bsc.es. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..b607d85 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,202 @@ + +# Contribute to TTS-API + +Everyone is welcome to contribute, and we value everybody's contribution. Code +contributions are not the only way to help the community. Answering questions, helping +others, and improving the documentation are also immensely valuable. + +However you choose to contribute, please be mindful and respect our +[code of conduct](CODE_OF_CONDUCT.md). + +**This guide was heavily inspired by the awesome [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).** + +## Ways to contribute + +There are several ways you can contribute: + +* Fix outstanding issues with the existing code. +* Submit issues related to bugs or desired new features. +* Contribute to the examples or to the documentation. + +If you don't know where to start, there is a special [Good First +Issue](https://github.com/projecte-aina/tts-api/contribute) listing. It will give you a list of +open issues that are beginner-friendly and help you start contributing to open-source. The best way to do that is to open a Pull Request and link it to the issue that you'd like to work on. We try to give priority to opened PRs as we can easily track the progress of the fix, and if the contributor does not have time anymore, someone else can take the PR over. + +For something slightly more challenging, you can also take a look at the [Good Second Issue](/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! 🚀 + +> All contributions are equally valuable to the community. 🥰 + +## Fixing outstanding issues + +If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](#create-a-pull-request) and open a Pull Request! + +## Submitting a bug-related issue or feature request + +Do your best to follow these guidelines when submitting a bug-related issue or a feature +request. It will make it easier for us to come back to you quickly and with good +feedback. + +### Did you find a bug? + +Before you report an issue, we would really appreciate it if you could **make sure the bug was not +already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the project itself, and not your code. If you're unsure whether the bug is in your code or the project, please ask in the [Discussions](https://github.com/projecte-aina/tts-api/discussions) first. This helps us respond quicker to fixing issues related to the project versus general questions. + +Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it: + +* Your **OS type and version** and **Python**, **PyTorch** and versions when applicable. +* A short, self-contained, code snippet that allows us to reproduce the bug in + less than 30s. +* The *full* traceback if an exception is raised. +* Attach any other additional information, like screenshots, you think may help. + + +### Do you want a new feature? + +If there is a new feature you'd like to see in this project, please open an issue and describe: + +1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the project? Is it a feature related to something you need for a project? Is it something you worked on and think it could benefit the community? + + Whatever it is, we'd love to hear about it! + +2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better we'll be able to help you. +3. Provide a *code snippet* that demonstrates the features usage. +4. If the feature is related to a paper, please include a link. + +If your issue is well written we're already 80% of the way there by the time you create it. + + +## Do you want to add documentation? + +We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be happy to make the changes or help you make a contribution if you're interested! + + +## Create a Pull Request + +Before writing any code, we strongly advise you to search through the existing PRs or +issues to make sure nobody is already working on the same thing. If you are +unsure, it is always a good idea to open an issue to get some feedback. + +You will need basic `git` proficiency to contribute to. +While `git` is not the easiest tool to use, it has the greatest +manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro +Git](https://git-scm.com/book/en/v2) is a very good reference. + +You'll need **Python 3.10** or above to contribute to. Follow the steps below to start contributing: + +1. Fork the [repository](https://github.com/projecte-aina/tts-api/) by + clicking on the **[Fork](https://github.com/projecte-aina/tts-api/fork)** button on the repository's page. This creates a copy of the code + under your GitHub user account. + +2. Clone your fork to your local disk, and add the base repository as a remote: + + ```bash + git clone git@github.com:/tts-api.git + cd tts-api + git remote add upstream https://github.com/projecte-aina/tts-api.git + ``` + +3. Create a new branch to hold your development changes: + + ```bash + git checkout -b a-descriptive-name-for-my-changes + ``` + + 🚨 **Do not** work on the `main` branch! + +4. To set up a development environment you have the following options: + + - Manually install the dependences and setup the environment, follow the steps at [README](https://github.com/projecte-aina/tts-api?tab=readme-ov-file#installation). + + - Develop in docker with autoreload, follow the steps available at the [DEVELOPER MODE](https://github.com/projecte-aina/tts-api?tab=readme-ov-file#develop-in-docker) section + + +5. Develop the features in your branch. + + As you work on your code, you should make sure the test suite + passes. Run the tests impacted by your changes like this: + + ```bash + pytest + ``` + + Once you're happy with your changes, add the changed files with `git add` and + record your changes locally with `git commit`: + + ```bash + git add modified_file.py + git commit + ``` + + Please remember to write [good commit + messages](https://chris.beams.io/posts/git-commit/) to clearly communicate the changes you made! + + To keep your copy of the code up to date with the original + repository, rebase your branch on `upstream/branch` *before* you open a pull request or if requested by a maintainer: + + ```bash + git fetch upstream + git rebase upstream/main + ``` + + Push your changes to your branch: + + ```bash + git push -u origin a-descriptive-name-for-my-changes + ``` + + If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally. + +6. Now you can go to your fork of the repository on GitHub and click on **Pull Request** to open a pull request. Make sure you tick off all the boxes on our [checklist](#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review. + +7. It's ok if maintainers request changes, it happens to our core contributors + too! So everyone can see the changes in the pull request, work in your local + branch and push the changes to your fork. They will automatically appear in + the pull request. + +### Pull request checklist + +☐ The pull request title should summarize your contribution.
+☐ If your pull request addresses an issue, please mention the issue number in the pull +request description to make sure they are linked (and people viewing the issue know you +are working on it).
+☐ To indicate a work in progress please prefix the title with `[WIP]`. These are +useful to avoid duplicated work, and to differentiate it from PRs ready to be merged.
+☐ Make sure existing tests pass.
+☐ If adding a new feature, also add tests for it.
+ + +### Style guide + +For documentation strings, this project follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). + + +### Develop on Windows + +On Windows (unless you're working in [Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) or WSL), you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings: + +```bash +git config core.autocrlf input +``` + +One way to run the `make` command on Windows is with MSYS2: + +1. [Download MSYS2](https://www.msys2.org/), and we assume it's installed in `C:\msys64`. +2. Open the command line `C:\msys64\msys2.exe` (it should be available from the **Start** menu). +3. Run in the shell: `pacman -Syu` and install `make` with `pacman -S make`. +4. Add `C:\msys64\usr\bin` to your PATH environment variable. + +You can now use `make` from any terminal (PowerShell, cmd.exe, etc.)! 🎉 + +### Sync a forked repository with upstream main + +When updating the main branch of a forked repository, please follow these steps to avoid pinging the upstream repository which adds reference notes to each upstream PR, and sends unnecessary notifications to the developers involved in these PRs. + +1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. +2. If a PR is absolutely necessary, use the following steps after checking out your branch: + + ```bash + git checkout -b your-branch-for-syncing + git pull --squash --no-commit upstream main + git commit -m '' + git push --set-upstream origin your-branch-for-syncing + ``` diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..aa271f9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,36 @@ +FROM python:3.10.12-slim +# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* + +# Install required packages for building eSpeak and general utilities +RUN apt-get update && apt-get install -y \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + git \ + wget \ + cmake \ + && rm -rf /var/lib/apt/lists/* + +RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng + +RUN pip install --upgrade pip && \ + cd espeak-ng && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make && \ + make install + +RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c + +WORKDIR /app +COPY ./requirements.txt /app +RUN python -m pip install --upgrade pip +RUN python -m pip install --no-cache-dir -r requirements.txt + +RUN wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P /app/models/vits_ca/ +RUN mv /app/models/vits_ca/best_model_8khz.pth /app/models/vits_ca/best_model.pth +COPY . . + +ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --mp_workers ${MP_WORKERS} --use_cuda ${USE_CUDA} --use_mp ${USE_MP} diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 0000000..2b5a7cd --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,35 @@ +FROM python:3.10.12-slim +# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* + +# Install required packages for building eSpeak and general utilities +RUN apt-get update && apt-get install -y \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + git \ + wget \ + cmake \ + && rm -rf /var/lib/apt/lists/* + +RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng + +RUN pip install --upgrade pip && \ + cd espeak-ng && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make && \ + make install + +RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c + +WORKDIR /app +# RUN wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P /app/models/vits_ca/ +# RUN mv /app/models/vits_ca/best_model_8khz.pth /app/models/vits_ca/best_model.pth + +COPY ./requirements.txt /app +RUN python -m pip install --upgrade pip +RUN python -m pip install --no-cache-dir -r requirements.txt + +ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --mp_workers ${MP_WORKERS} --use_cuda ${USE_CUDA} --use_mp ${USE_MP} --show_details True --reload \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test new file mode 100644 index 0000000..1f840aa --- /dev/null +++ b/Dockerfile.test @@ -0,0 +1,34 @@ +FROM python:3.10.12-slim +# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* + +# Install required packages for building eSpeak and general utilities +RUN apt-get update && apt-get install -y \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + git \ + wget \ + cmake \ + && rm -rf /var/lib/apt/lists/* + +RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng + +RUN pip install --upgrade pip && \ + cd espeak-ng && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make && \ + make install + +RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c + +WORKDIR /app + +COPY ./requirements.txt /app +RUN python -m pip install --upgrade pip +RUN python -m pip install --no-cache-dir -r requirements.txt +RUN pip install pytest httpx pydub pytest-repeat + +ENTRYPOINT pytest diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..14e2f77 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2315c6a --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +deploy: + docker compose --env-file .env up -d --build +deploy-d: + docker compose --env-file .env up --build +deploy-gpu: + docker compose -f docker-compose-gpu.yml --env-file .env up -d --build +dev: + docker compose -f docker-compose-dev.yml up --build +tests: + docker compose -f docker-compose-test.yml up --build +undeploy: + docker compose down +stop: + docker compose stop + + +act-run-tests: + gh act -j test -W '.github/workflows/tests.yml' \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..ee137c1 --- /dev/null +++ b/README.md @@ -0,0 +1,269 @@ +# TTS API + +RestFUL api and web interface to serve coqui TTS models + +## Installation + +The requirements are tested for python 3.10. In order for coqui TTS to work, some dependencies should be installed. + +1. Update your system's package list and install the required packages for building eSpeak and general utilities: +```bash +sudo apt update && sudo apt install -y \ + build-essential \ + autoconf \ + automake \ + libtool \ + pkg-config \ + git \ + wget \ + cmake +``` +2. Clone the eSpeak-ng repository and build it: +```bash +git clone -b dev-ca https://github.com/projecte-aina/espeak-ng +cd espeak-ng && \ + sudo ./autogen.sh && \ + sudo ./configure --prefix=/usr && \ + sudo make && \ + sudo make install +``` + +Later simply: + +``` +python -m pip install --upgrade pip +``` + +In order to synthesize, the actual model needs to be downloaded and the paths in the config file need to be changed (replacing `/opt` with the top directory of the repository). The model can be downloaded from [http://share.laklak.eu/model_vits_ca/best_model.pth](http://share.laklak.eu/model_vits_ca/best_model.pth) to the models directory. + +## Launch + +tts-api uses `FastAPI` and `uvicorn` under the hood. For now, in order to launch: + +``` +python server/server.py --model_path models/vits_ca/best_model.pth --config_path models/vits_ca/config.json --port 8001 +``` +that receives the calls from `0.0.0.0:8001`, or simply +``` +python server/server.py +``` +which gets the calls from `0.0.0.0:8000` by default + +## Usage + +tts-api has three inference endpoints, two openapi ones (as can be seen via `/docs`) and one websocket endpoint: + +* `/api/tts`: main inference endpoint +* `/audio-stream`: websocket endpoint; capable of doing async inference, as soon as the first segment is synthesized the audio starts streaming. + +The example for `/api/tts` can be found in `/docs`. The websocket request is contingent on the communication with the client, hence we provide an example client at the `/websocket-demo` endpoint. For the `api/tts` the call is as the following: + +``` +curl --location --request POST 'http://localhost:8080/api/tts' --header 'Content-Type: application/json' --data-raw '{ + "voice": "f_cen_81", + "type": "text", + "text": "El Consell s’ha reunit avui per darrera vegada abans de les eleccions. Divendres vinent, tant el president com els consellers ja estaran en funcions. A l’ordre del dia d’avui tampoc no hi havia l’aprovació del requisit lingüístic, és a dir la normativa que ha de regular la capacitació lingüística dels aspirants a accedir a un lloc en la Funció Pública Valenciana.", + "language": "ca-es" }' --output tts.wav +``` + +## Docker launch from the hub + + +To launch using lastest version available on the Dockerhub: + + +``` +docker run --shm-size=1gb -p 8080:8000 projecteaina/tts-api:latest +``` + +[Check out the documentation available on the Dockerhub](https://hub.docker.com/r/projecteaina/tts-api) + +## Docker build and launch + +To build: +``` +docker build -t tts-api . +``` + +To launch: +``` +docker run --shm-size=1gb -p 8080:8000 tts-api +``` +The default entrypoint puts the web interface to `http://0.0.0.0:8080/`. + + +## Develop in docker +You can run this api with docker with reload mode that will let you watch you local changes on api. + +To run in dev mode run the following command. + +```bash +make dev +``` + +> [!NOTE] +> The model **best_model.pth** is requiered, you have to download by yourself. +```bash +wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P models/vits_ca/ +``` +```bash +mv models/vits_ca/best_model_8khz.pth models/vits_ca/best_model.pth +``` + +## REST API Endpoints + +| **Method** | **Endpoint** | **Description** | +|------------|--------------|-------------------------------------------------------| +| POST | `/api/tts` | Generate speech audio from text using TTS. | + +**Request Parameters:** + +| **Parameter** | **Type** | **Description** | +|---------------|--------------------|------------------------------------------------------------| +| language | string | ISO language code (e.g., "ca-es") | +| voice | string | Name of the voice to use | +| type | string | Type of input text ("text" or "ssml") | +| text | string | Text to be synthesized (if type is "ssml", enclose in tags) | + + +**NOTES:** +- ssml format is not available yet. +- Currently, only "ca-es" language is supported, and will be applied by default + +**Successful Response:** + +The endpoint returns a streaming response that contains the synthesized speech audio in WAV format. + + +**Sample Request:** + +```http +POST /api/tts + +{ + "voice": "speaker_id", + "text": "Bon dia!", + "type": "text" +} +``` + + +#### Command line deployment arguments +| **Argument** | **Type** | **Default** | **Description** | +|------------------------|----------|-----------------------------------------|-------------------------------------------------------------------------------| +| mp_workers | int | 2 | Number of CPUs used for multiprocessing. | +| speech_speed | float | 1.0 | Change the speech speed. | + +- mp_workers: the "mp_workers" argument specifies the number of separate processes used for inference. For example, if mp_workers is set to 2 and the input consists of 2 sentences, there will be a process assigned to each sentence, speeding up inference. + +- The "speech_speed" argument refers to a parameter that adjusts the rate at which speech sounds in an audio output, with higher values resulting in faster speech, and lower values leading to slower speech. + + +## Deployment + + +### Environment Variables + +To deploy this project, you will need to add the following environment variables to your .env file + +`SPEECH_SPEED` + +`MP_WORKERS` + +`USE_CUDA` + +`USE_MP` + +`SHM_SIZE` + + +Example of .env file +```bash +SPEECH_SPEED=1.0 +MP_WORKERS=4 +USE_CUDA=False +USE_MP=True +SHM_SIZE=2gb +``` + + +### Deployment via docker compose + +#### Prerequisites + +- Make + +- [Docker](https://docs.docker.com/engine/install/ubuntu/) + +- [Docker compose](https://docs.docker.com/compose/install/) + +To deploy this app +```bash +make deploy +``` + +To deploy this app using GPU +```bash +make deploy-gpu +``` +To stop deployment run +```bash +make stop +``` +To delete deployment run +```bash +make undeploy +``` + +#### Deployment via Helm + +The chart is still not available on any repository so you need to run this command from the repository folder. +Please, keep in mind that if you are deploying this chart to a cloud K8s instance you need to push the Docker image first +to an image registry. + +Create namespace +```bash +kubectl create namespace apps +``` +Deploy chart +```bash +#Run the following command from $BASE_REPO_PATH/charts/aina-tts-api path +helm upgrade --install aina-tts-api --create-namespace . +``` + +You can either change the values on `values.yaml` or override them. + +```bash +helm upgrade --install aina-tts-api --create-namespace \ + --set global.namespace=apps \ + --set api.image=tts-api \ + --set api.tag=latest . +``` + +Deploy helm chart with a different speech speed value +```bash +helm upgrade --install aina-tts-api --create-namespace \ + --set api.speech_speed=1.6 . +``` + +## Authors and acknowledgment +Developed by the Language Technologies Unit in Barcelona Supercomputing Center. The code is based on Coqui TTS server.py that has a Mozilla Public License 2.0. + +## License +Mozilla Public License 2.0 + +## Project status + +- [x] Conteinerized +- [x] Improved endpoints +- [x] Improved models +- [x] Speed control +- [ ] Caching + +## Funding + +This work is funded by the [Generalitat de +Catalunya](https://politiquesdigitals.gencat.cat/ca/inici/index.html#googtrans(ca|en)) +within the framework of [Projecte AINA](https://politiquesdigitals.gencat.cat/ca/economia/catalonia-ai/aina). + +Generalitat logo diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..a8ee8c9 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +If you discover a security issue, please bring it to our attention right away! + +## Reporting a Vulnerability + +Please **DO NOT** file a public issue to report a security vulberability, instead send your report privately to **langtech@bsc.es**. This will help ensure that any vulnerabilities that are found can be [disclosed responsibly](https://en.wikipedia.org/wiki/Responsible_disclosure) to any affected parties. + +## Supported Versions + +Project versions that are currently being supported with security updates vary per project. +Please see specific project repositories for details. +If nothing is specified, only the latest major versions are supported. \ No newline at end of file diff --git a/__pycache__/main.cpython-310.pyc b/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feea6255c357618f85f5ad67887739e1512a62d3 GIT binary patch literal 3830 zcmZ`*&2JpH6(8=*&aPG?X|?+BhpcEM`D<c}tvG?*;^ibs1Lr-ZAeUG!d8ae3<_((oJ@;yHO z-XoKlnVJFLUtj!s{~ybS@lWg&zgg&f1Vvmf83r{diHrg92*GwKDh*7})NM1eJPUYc zRQAfcwmn6-^Hu&(U+s(ad1ho7HVQnj6%;Ix!ew^RG$2K`Sc; zt*#i}0;}~GP8Qk1De;z=$rfG{Z<$Vc7c~DOTVfYz?bP%x75dZAzf2uE^QGafu$9-w z$(2*XTV?OiS+@GR1R70m4T=k89m)ojO}5&<%74d}Pp+OCV8_(SHC?avuTyhK_*XE# zCcc948_>FbN`3=tc&>L7dQFfp_@CKLI(Ny4OUPlXJ`_Dd_}9?d5MPhxHuc;MZ_~R4 ze6Ryn{{~y$8c^4Bfv$V&h5giFKevGkc7p8Ru;pz4-PBfZ(fLD@|DDwt&fLQNF^8=| zZ=G$hO@=eKptPX0p>&|!VXL(9#_{gb1-b}c+Gh9Y(qZWjgika3-_z~lo(;nbTqTLrsoXxk}oxI;U3Z?M7K{mU` zg!DDDoyuM|oiCu}yJ*$;0|XmWP#!@MJpyWFL8YX$CgS&_D^o=ly>UPtu} zdh<y~$!U6KiI5EH!as;eqX{Tu_OY$L8)}x#o#kA=ew;=FBz~- zkg|~9?d%?RdjTKp@}$qYQgoygtso6MgCLCC>9LvvQ~;c$zn3Ht3*rsbjYc>mYmTAG z57K}G?%MaGq#H!yLHn&)*-Wbyg=4r0MLvA~B-=)g8yp2;6zoOJO*v~x4tg1N!3;Nu zse6=k0ma-MVYyj`9UiAlmE(XP2vr6>LaGX7`@t}h%IdKw&74+C%lmOKVA)kzD~Gx0 zu(%WTMao#W*O5UtkvxgFTaO<<%3kb%tefn+Nvtc``=LR(5pEc}`w1Te(!C!A@xc%x z^+D%8#9$zpgw~nv2ThaDgRl4^k|iM7Y^yceQ*PaDwDB#yoPnnWc;T(xqZjia^;5_e z*=5Wbu1s?8xcQK~A4aSVinUIyT`QhAMNeHgKUL2u+NOp6I2Jl*N3OvA#l9O#HxzE1 zNEiH%$F?=HWGI+$KmL^)i18NM?ylaGI|vS$D~6o8vIibKPKMx5A%iF?>^BPcc)*X{ zJqC_u?#<7FBlhXeP7}8^*^%C0wshvtqH8vO-1MT0vc*;lpaAx8;iCg`r*8c6*(W=C zk9f-N0ALl473kXhwUTmM-DyKb1C$+fd; zs|Ako2Whs6QHw*+cXy9vFNt%-DNnjgh%i2AH_HVmH4sk=6HwLimAXS3Wb?(f9=&+{ zu&v`E^tdHtq_OJ8C8W0qSKa|eaH`n0~X6& zoyC+BQ0l{|<|$K1uBJxd>ISNs$2YXhFqXUw>#EWq1(9?Jm+&x_s=N=zKsLghsg{18 zXUTjpkb>NEM0;!T}*&?OFZ+~Z1$9?YGq3C}mp07YD@)*4)XecrAW_PcgV^F{G7+(XZ?r zeh@O1;ZfL4c&to5jQKXK$?qX~7s-1_@DeT%ecTqV`zILCSL+qRe+q3?^?l^~{<2BM~m zFAa-*9?CH+wIQ=Kc^PA}$R7pcu~4$X86veL@%a|6+CtJs(m`?u$z34I1SCw33a9wK znuAAx4=L7{J$SfN0T2ep0SA3~ZlpN}?mT!^4HzVQT7ZK_hsHz|9}>feJ;1{kzX#&L zqpNB{0vP>H$srDDSje@^I;oSYUHhO$tdd3Y-!7wU-E5G{q+V)}nrRdK>!en~x@J10 ZG)WhjD#kN0#n}I;)@xRc*@bfeaW3MTBw^%w;l~?%f@7c4pk& z%f~=I$+v(XketY44*3)N1Kd}i4ET^+fWUxU)w9bT8wikIR84htbys!0db3W)(O~@W zufL6o75L($I$AJ!3cz3JnnpCDCz@Y=bzfI=!#C8|^ey$R`8D_&i9N0Rbsc5Rq%m#! zO`t8`AvM26Y`;zFeuvifyJtP>(AK%(uUyR9Fk8Ktbzs&f4buEZ^VjIwTkUN9T=O^R zd&Hp|Zw)wA_b&ll2G|7H0=NQj72q1(*oU>5&JK|BF9F{4Hy3At~tlDepo#s#Md97os{PD|9$krhr?G)RxERL)t2U461v;jF*W z$Od^2_FW>EPwlzBP2hGEcdNp^GMC*TS1a6Wq6vDggYUv;><`sXvzH3FVL|wIn;D3<&-BpBlsH{P*$ymnvbL24O;vhxv)@ zMOk`CnFwBISwh2f6AeI1+IrzF5|fSd5DJ40KS{DEO!%k6-|Q;dgMrGxAP>c)xV#J4 z6`A{TXXh`jT5v~kLWk-aCpndMLPy~&5z?B_Brm#y!C@9b3bXQn^D=&~WfA2(PRB#9RyqLhE^BGX#$47Q z5?m;s6wUdO5P{qO^E@$uZbR_6!YSpW83~Kk>W1fE{{Goe#WW9&%4YNx8;5l8_&` zYE8vJMT<|eqkvEm#tAR3d=4FrLC>7WsN}nOIHsj=PH8m3{zl38yKiy)^kfW5Vf_0^ zvHpA#reo^jdNffVGAufSfuPeI&z&(k{>R^OeE+1lwuh;)T*1r7>4-9#MxYMj$Y_#< zq*(o<`a%l0(04%r_pI{zYoJ$Uth%|^mJNuVDk<50#>TTLO~tONFxd_X31C(6qzR#w zjk2<$P&$hu0lEW?dYSan!upkFoC;=wHdz~iO+s#bR+jb49N&^KDc=;&VECj;5uuIRVtr8-`^Vdik?|w(KSC0Byf>VAX!tH~M-Pr0jQHH7{}7urDM^ zx$0_3TLyDMQPIWhc7L}RUQ;^D1|!=GM_{{=EAu9RZS=4&x|esWjOvWgZ;N9wK^#=zT=k(kn=X#=^rflU0wtsX7f#4Y(bYB#p5!#{du@6>CbMV)Y>8|s?xob9$_cIR{|46wlLIT5{H~}b{qDw zJBaQgx`*gKq6dJa2?4zDFy1K$WDj170NP+sHc}4R2aGWbNl^ALQgXNm;Z>bdXyv35 zMHN&P%El+)%2`4`#m>tA3aEkgrG9Orty`7_wPxvEqYJgvFr9&;<9zvZOsFSVbD*va Vv#iABDpcQp8l8sa=;hD)`4`vYVjTbg literal 0 HcmV?d00001 diff --git a/charts/aina-tts-api/Chart.yaml b/charts/aina-tts-api/Chart.yaml new file mode 100644 index 0000000..51a3af3 --- /dev/null +++ b/charts/aina-tts-api/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: aina-tts-api +version: 0.1.0 +description: RestFUL api and web interface to serve coqui TTS models + +home: https://github.com/projecte-aina/tts-api +keywords: + - ai + - tts + - coqui + - cat +maintainers: + - name: Projecte Aina + email: aina@bsc.es +sources: + - https://github.com/projecte-aina/tts-api diff --git a/charts/aina-tts-api/templates/deployment.yaml b/charts/aina-tts-api/templates/deployment.yaml new file mode 100644 index 0000000..ad03f62 --- /dev/null +++ b/charts/aina-tts-api/templates/deployment.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tts-api + namespace: {{.Values.global.namespace}} + labels: + component: tts-api +spec: + selector: + matchLabels: + component: tts-api + template: + metadata: + labels: + component: tts-api + spec: + volumes: + - name: dshm + emptyDir: + medium: Memory + sizeLimit: {{.Values.api.dshm_size | default "2Gi" | quote }} + containers: + - name: api + image: {{.Values.api.image}}:{{.Values.api.tag}} + imagePullPolicy: {{ default "IfNotPresent" .Values.pullPolicy }} + ports: + - containerPort: 8000 + env: + - name: "SPEECH_SPEED" + value: {{.Values.api.speech_speed | default "1.0" | quote }} + - name: "MP_WORKERS" + value: {{.Values.api.mp_workers | default "4" | quote }} + volumeMounts: + - mountPath: /dev/shm + name: dshm \ No newline at end of file diff --git a/charts/aina-tts-api/templates/service.yaml b/charts/aina-tts-api/templates/service.yaml new file mode 100644 index 0000000..01476ec --- /dev/null +++ b/charts/aina-tts-api/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: tts-api + namespace: {{.Values.global.namespace}} + labels: + component: tts-api +spec: + ports: + - name: api + protocol: TCP + port: 8000 + targetPort: 8000 + selector: + component: tts-api diff --git a/charts/aina-tts-api/values.yaml b/charts/aina-tts-api/values.yaml new file mode 100644 index 0000000..3f49414 --- /dev/null +++ b/charts/aina-tts-api/values.yaml @@ -0,0 +1,6 @@ +global: + namespace: apps + +api: + image: projecteaina/tts-api + tag: latest diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml new file mode 100644 index 0000000..bab2de2 --- /dev/null +++ b/docker-compose-dev.yml @@ -0,0 +1,18 @@ +version: '3.9' +services: + server: + build: + context: . + dockerfile: Dockerfile.dev + container_name: fastapi-dev + environment: + - SPEECH_SPEED=${SPEECH_SPEED-1.0} + - MP_WORKERS=${MP_WORKERS-4} + - USE_MP=False + restart: always + volumes: + - .:/app + + ports: + - '8080:8000' + shm_size: ${SHM_SIZE-2gb} diff --git a/docker-compose-gpu.yml b/docker-compose-gpu.yml new file mode 100644 index 0000000..c434532 --- /dev/null +++ b/docker-compose-gpu.yml @@ -0,0 +1,21 @@ +version: '3.9' +services: + server: + build: + context: . + environment: + - SPEECH_SPEED=${SPEECH_SPEED} + - MP_WORKERS=${MP_WORKERS} + - USE_CUDA=True + - USE_MP=${USE_MP} + restart: unless-stopped + ports: + - '8080:8000' + shm_size: ${SHM_SIZE} + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] \ No newline at end of file diff --git a/docker-compose-test.yml b/docker-compose-test.yml new file mode 100644 index 0000000..139c009 --- /dev/null +++ b/docker-compose-test.yml @@ -0,0 +1,14 @@ +version: '3.9' +services: + server: + build: + context: . + dockerfile: Dockerfile.test + container_name: fastapi-test + restart: always + volumes: + - .:/app + + ports: + - '8080:8000' + shm_size: ${SHM_SIZE-2gb} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..59a5fea --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3.9' +services: + server: + build: + context: . + environment: + - SPEECH_SPEED=${SPEECH_SPEED} + - MP_WORKERS=${MP_WORKERS} + - USE_MP=${USE_MP} + restart: unless-stopped + ports: + - '8080:8000' + shm_size: ${SHM_SIZE} diff --git a/infer_wavenext_onnx.py b/infer_wavenext_onnx.py new file mode 100644 index 0000000..83490dd --- /dev/null +++ b/infer_wavenext_onnx.py @@ -0,0 +1,170 @@ +import argparse +import os +import warnings +from pathlib import Path +from time import perf_counter + +import numpy as np +import onnxruntime as ort +import soundfile as sf +import torch + +from text import text_to_sequence, sequence_to_text + + +def intersperse(lst, item): + # Adds blank symbol + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def process_text(i: int, text: str, device: torch.device): + print(f"[{i}] - Input text: {text}") + x = torch.tensor( + intersperse(text_to_sequence(text, ["catalan_cleaners"]), 0), + dtype=torch.long, + device=device, + )[None] + x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device) + x_phones = sequence_to_text(x.squeeze(0).tolist()) + print(f"[{i}] - Phonetised text: {x_phones[1::2]}") + + return {"x_orig": text, "x": x, "x_lengths": x_lengths, "x_phones": x_phones} + + +def validate_args(args): + assert ( + args.text or args.file + ), "Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms." + assert args.temperature >= 0, "Sampling temperature cannot be negative" + assert args.speaking_rate >= 0, "Speaking rate must be greater than 0" + return args + + +def write_wavs(model, inputs, output_dir, external_vocoder=None): + if external_vocoder is None: + print("The provided model has the vocoder embedded in the graph.\nGenerating waveform directly") + t0 = perf_counter() + wavs, wav_lengths = model.run(None, inputs) + infer_secs = perf_counter() - t0 + mel_infer_secs = vocoder_infer_secs = None + else: + print("[🍵] Generating mel using Matcha") + mel_t0 = perf_counter() + mels, mel_lengths = model.run(None, inputs) + mel_infer_secs = perf_counter() - mel_t0 + print("Generating waveform from mel using external vocoder") + vocoder_inputs = {external_vocoder.get_inputs()[0].name: mels} + vocoder_t0 = perf_counter() + wavs = external_vocoder.run(None, vocoder_inputs)[0] + vocoder_infer_secs = perf_counter() - vocoder_t0 + wavs = wavs.squeeze(1) + wav_lengths = mel_lengths * 256 + infer_secs = mel_infer_secs + vocoder_infer_secs + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for i, (wav, wav_length) in enumerate(zip(wavs, wav_lengths)): + output_filename = output_dir.joinpath(f"output_{i + 1}.wav") + audio = wav[:wav_length] + print(f"Writing audio to {output_filename}") + sf.write(output_filename, audio, 22050, "PCM_24") + + wav_secs = wav_lengths.sum() / 22050 + print(f"Inference seconds: {infer_secs}") + print(f"Generated wav seconds: {wav_secs}") + rtf = infer_secs / wav_secs + if mel_infer_secs is not None: + mel_rtf = mel_infer_secs / wav_secs + print(f"Matcha RTF: {mel_rtf}") + if vocoder_infer_secs is not None: + vocoder_rtf = vocoder_infer_secs / wav_secs + print(f"Vocoder RTF: {vocoder_rtf}") + print(f"Overall RTF: {rtf}") + + +def main(): + parser = argparse.ArgumentParser( + description=" 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching" + ) + parser.add_argument( + "model", + type=str, + help="ONNX model to use", + ) + parser.add_argument("--vocoder", type=str, default=None, help="Vocoder to use (defaults to None)") + parser.add_argument("--text", type=str, default=None, help="Text to synthesize") + parser.add_argument("--file", type=str, default=None, help="Text file to synthesize") + parser.add_argument("--spk", type=int, default=None, help="Speaker ID") + parser.add_argument( + "--temperature", + type=float, + default=0.667, + help="Variance of the x0 noise (default: 0.667)", + ) + parser.add_argument( + "--speaking-rate", + type=float, + default=1.0, + help="change the speaking rate, a higher value means slower speaking rate (default: 1.0)", + ) + parser.add_argument("--gpu", action="store_true", help="Use CPU for inference (default: use GPU if available)") + parser.add_argument( + "--output-dir", + type=str, + default=os.getcwd(), + help="Output folder to save results (default: current dir)", + ) + + args = parser.parse_args() + args = validate_args(args) + + if args.gpu: + providers = ["GPUExecutionProvider"] + else: + providers = ["CPUExecutionProvider"] + model = ort.InferenceSession(args.model, providers=providers) + + model_inputs = model.get_inputs() + model_outputs = list(model.get_outputs()) + + if args.text: + text_lines = args.text.splitlines() + else: + with open(args.file, encoding="utf-8") as file: + text_lines = file.read().splitlines() + + processed_lines = [process_text(0, line, "cpu") for line in text_lines] + x = [line["x"].squeeze() for line in processed_lines] + # Pad + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x = x.detach().cpu().numpy() + x_lengths = np.array([line["x_lengths"].item() for line in processed_lines], dtype=np.int64) + inputs = { + "x": x, + "x_lengths": x_lengths, + "scales": np.array([args.temperature, args.speaking_rate], dtype=np.float32), + } + is_multi_speaker = len(model_inputs) == 4 + if is_multi_speaker: + if args.spk is None: + args.spk = 0 + warn = "[!] Speaker ID not provided! Using speaker ID 0" + warnings.warn(warn, UserWarning) + inputs["spks"] = np.repeat(args.spk, x.shape[0]).astype(np.int64) + + has_vocoder_embedded = model_outputs[0].name == "wav" + if has_vocoder_embedded: + write_wavs(model, inputs, args.output_dir) + elif args.vocoder: + external_vocoder = ort.InferenceSession(args.vocoder, providers=providers) + write_wavs(model, inputs, args.output_dir, external_vocoder=external_vocoder) + else: + warn = "[!] A vocoder is not embedded in the graph nor an external vocoder is provided. The mel output will be written as numpy arrays to `*.npy` files in the output directory" + warnings.warn(warn, UserWarning) + write_mels(model, inputs, args.output_dir) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..8ab7740 --- /dev/null +++ b/main.py @@ -0,0 +1,155 @@ +from pathlib import Path +from TTS.utils.manage import ModelManager +# from lingua_franca import load_language # Lingua franca + +import argparse +import uvicorn +import torch +import multiprocessing as mp +import sys +import os + +from server import create_app +from server.utils.argparse import MpWorkersAction +from server.utils.utils import update_config + +# Set global paths +# Determine the current script's directory and set up paths related to the model +path = Path(__file__).parent / "server" /".models.json" +path_dir = os.path.dirname(path) + +# Initialize the model manager with the aforementioned path +manager = ModelManager(path) + +# Set the relative paths for the default TTS model and its associated configuration +models_path_rel = '../models/vits_ca' +model_ca = os.path.join(path_dir, models_path_rel, 'best_model.pth') +config_ca = os.path.join(path_dir, models_path_rel, 'config.json') + +# Load lingua franca language +# load_language('ca-es') + + +def create_argparser(): + def convert_boolean(x): + return x.lower() in ["true", "1", "yes"] + + # Create an argument parser to handle command-line arguments + # The parser setup seems incomplete and might be continued in the next section of the code. + parser = argparse.ArgumentParser() + parser.add_argument( + "--list_models", + type=convert_boolean, + nargs="?", + const=True, + default=False, + help="list available pre-trained tts and vocoder models." + ) + parser.add_argument( + "--model_name", + type=str, + default="tts_models/en/ljspeech/tacotron2-DDC", + help="Name of one of the pre-trained tts models in format //", + ) + parser.add_argument("--vocoder_name", type=str, default=None, help="name of one of the released " + "vocoder models.") + # Args for running custom models + parser.add_argument( + "--config_path", + default=config_ca, + type=str, + help="Path to model config file." + ) + parser.add_argument( + "--model_path", + type=str, + default=model_ca, + help="Path to model file.", + ) + parser.add_argument( + "--vocoder_path", + type=str, + help="Path to vocoder model file. If it is not defined, model uses GL as vocoder. Please make sure that you " + "installed vocoder library before (WaveRNN).", + default=None, + ) + parser.add_argument("--vocoder_config_path", type=str, help="Path to vocoder model config file.", default=None) + parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", default=None) + parser.add_argument("--port", type=int, default=8000, help="port to listen on.") + parser.add_argument("--host", type=str, default="0.0.0.0", help="host ip to listen.") + parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, help="true to use Python multiprocessing.") + parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, help="true to use CUDA.") + parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=mp.cpu_count(), nargs='?', const=mp.cpu_count(), help="number of CPUs used for multiprocessing") + parser.add_argument("--debug", type=convert_boolean, default=False, help="true to enable Flask debug mode.") + parser.add_argument("--show_details", type=convert_boolean, default=False, help="Generate model detail page.") + parser.add_argument("--speech_speed", type=float, default=1.0, nargs='?', const=1.0, help="Change speech speed.") + parser.add_argument("--reload", type=bool, action=argparse.BooleanOptionalAction, default=False, help="Reload on changes") + return parser + + +# parse the args +args = create_argparser().parse_args() +print("args =========", args) +# update in-use models to the specified released models. +model_path = None +config_path = None +speakers_file_path = None +vocoder_path = None +vocoder_config_path = None +# new_speaker_ids = None +# use_aliases = None + +# CASE1: list pre-trained TTS models +if args.list_models: + manager.list_models() + sys.exit() + +# CASE2: load pre-trained model paths +if args.model_name is not None and not args.model_path: + model_path, config_path, model_item = manager.download_model(args.model_name) + args.vocoder_name = model_item["default_vocoder"] if args.vocoder_name is None else args.vocoder_name + +if args.vocoder_name is not None and not args.vocoder_path: + vocoder_path, vocoder_config_path, _ = manager.download_model(args.vocoder_name) + +# CASE3: set custom model paths +if args.model_path is not None: + model_path = args.model_path + config_path = args.config_path + speakers_file_path = args.speakers_file_path + speaker_ids_path = os.path.join(path_dir, models_path_rel, 'speaker_ids.json') + +if args.vocoder_path is not None: + vocoder_path = args.vocoder_path + vocoder_config_path = args.vocoder_config_path + +# CASE4: change speaker speed +if args.speech_speed != 1.0: + update_config(config_path, args.speech_speed) + + +app = create_app( + model_path = model_path, + config_path = config_path, + speakers_file_path = speakers_file_path, + vocoder_path = vocoder_path, + vocoder_config_path = vocoder_config_path, + speaker_ids_path = speaker_ids_path, + speech_speed = args.speech_speed, + mp_workers = args.mp_workers, + use_cuda = args.use_cuda, + use_mp = args.use_mp, + show_details=args.show_details, + args=args + ) + + +def main(): + uvicorn.run('main:app', host=args.host, port=args.port, reload=args.reload) + + +if __name__ == "__main__": + torch.set_num_threads(1) + torch.set_grad_enabled(False) + mp.set_start_method("fork") + main() diff --git a/main_alex.py b/main_alex.py new file mode 100644 index 0000000..af82e1c --- /dev/null +++ b/main_alex.py @@ -0,0 +1,111 @@ +import argparse +import uvicorn +import torch +import multiprocessing as mp +import os + +from server import create_app +from server.utils.argparse import MpWorkersAction + + +# Set the relative paths for the default TTS model and its associated configuration +models_path_rel = '/home/apeir1/PycharmProjects/tts-api/models/matxa_onnx' +# model_name = 'matcha_multispeaker_cat_opset_15_10_steps_2399.onnx' +model_name = 'matcha_wavenext_simply.onnx' +# model_name = 'matxa_vocos_merged_HF_simplified_dynamic.onnx' +vocoder_name = 'mel_spec_22khz.onnx' +spk_ids_file = 'spk_ids.json' + +model_ca = os.path.join(models_path_rel, model_name) +vocoder_ca = os.path.join(models_path_rel, vocoder_name) +ids_file_path = os.path.join(models_path_rel, spk_ids_file) + + +def create_argparser(): + def convert_boolean(x): + return x.lower() in ["true", "1", "yes"] + parser = argparse.ArgumentParser() + # Args for running custom models + parser.add_argument( + "--model_path", + type=str, + default=model_ca, + help="Path to ONNX model file.", + ) + parser.add_argument( + "--vocoder_path", + type=str, + help="Path to ONNX vocoder", + default=vocoder_ca, + ) + parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", + default=ids_file_path) + parser.add_argument("--unique_model", type=bool, help="set to True if the model is a TTS+Vocoder", + default=True) + parser.add_argument("--port", type=int, default=8000, help="port to listen on.") + parser.add_argument("--host", type=str, default="0.0.0.0", help="host ip to listen.") + parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, + help="true to use Python multi-processing.") + parser.add_argument("--use_mth", type=convert_boolean, default=True, nargs='?', const=True, + help="true to use Python multi-threading.") + parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, + help="true to use CUDA.") + parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=1, # mp.cpu_count() + nargs='?', const=1, help="number of CPUs used for multiprocessing") + parser.add_argument("--debug", type=convert_boolean, default=False, + help="true to enable Flask debug mode.") + parser.add_argument("--show_details", type=convert_boolean, default=False, + help="Generate model detail page.") + parser.add_argument("--speech_speed", type=float, default=0.9, nargs='?', const=1.0, + help="Change speech speed.") + parser.add_argument("--temperature", type=float, default=0.4, nargs='?', const=1.0, + help="Set temperature of inference.") + parser.add_argument("--reload", type=bool, action=argparse.BooleanOptionalAction, default=False, + help="Reload on changes") + return parser + + +# parse the args +args = create_argparser().parse_args() +print("args =========", args) +# update in-use models to the specified released models. +model_path = None +config_path = None +speakers_file_path = None +vocoder_path = None +vocoder_config_path = None +new_speaker_ids = None +use_aliases = None + +# CASE3: set custom model paths +if args.model_path is not None: + model_path = args.model_path + speakers_file_path = args.speakers_file_path + speaker_ids_path = os.path.join(models_path_rel, 'spk_ids.json') + +if args.vocoder_path is not None: + vocoder_path = args.vocoder_path + + +app = create_app( + model_path=model_path, + vocoder_path=vocoder_path, + speaker_ids_path=speaker_ids_path, + speech_speed=args.speech_speed, + temperature=args.temperature, + mp_workers=args.mp_workers, + use_cuda=args.use_cuda, + use_mp=args.use_mp, + args=args + ) + + +def main(): + uvicorn.run('main_alex:app', host=args.host, port=args.port, reload=args.reload) + + +if __name__ == "__main__": + torch.set_num_threads(1) + torch.set_grad_enabled(False) + mp.set_start_method("fork") + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f491aa2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +# tts==0.17.6 + +numpy +scipy +soundfile +torch +onnxruntime +fastapi==0.103.2 +uvicorn[standard] diff --git a/scripts/__pycache__/inference_onnx.cpython-310.pyc b/scripts/__pycache__/inference_onnx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9935f7ecd87d073f0cdac948b833f357ae500ead GIT binary patch literal 6077 zcmcIoTaP1074Ey+ZF|Nu!FT1s{|L_%VL1PBl;oU#J&^kdXF?yn+PbJJoHEcN8A6)zwv}PMx}) z`szEKHX4C}-{|i@=sC|A#^0zg|Er?#DLnE|NSMLQzA-R&Oq23f-x}CEw$9sqXUB!i z?t42P-cG+V@OOM&=k}|EU?(W`YCCnU*Vt(=k5wKRI}6NbRlFD377JMIfw|LUb=H7n ziP`us{LEsDyuQE8ntX*V@fKTVcF$)kto6X#Sw*Ww9`M$tq3fy6W2f2DBHukKIEd;>87J3<6;|v%4be8E^od@I$w%q)-|a` z`Lpn*=M_AXA<2z>Lk37q8GHdtbl?20iIT;v16QsL=wZ&;%twLFyqHrn2N9$_jx!N$t=9b!=XqIW5!wd4!lU+`JGaAxLnzY$*UhptJlc!pK?qCXQFO?AsTjzsf+l$S;oH$**pHK4bTPo1 zMA=C2up1>ws{2UzZj{A`{Pfw|LmnNFB6QBjuq!x<7~o!%gx8gOn08aCDtmD+-i?ye zkKOErC+R30NghH0v*03$`XQ+?E}jdAeI7~vTquXU8~09Vq}{zRO?bPa92&lIXwIrO zH$2sVwjN7|#-i$_wayTGdtOSS0atE6J>sHmDJK%UQu$ErP{q53Si=1O_0qLHtkbn< z$Yb%swOc1xM=`i1(tX~|dfzdf_bto$&$7L2dS+m5SdJ-HP}#MSOn*-yQ^BKM>sdG%Gske6v6I_l zw`%0(gwCCv8?kw2y=jlVab@i1US45N?zfC}qh*Y%nK`j?_skpzxg~yxl8wj7jWZLn z+RR36;+`4gc%`lSxG`SH>v;pV7D`)-Xj##%tGaa)c|UJXstoog0rRwUX{NQP%gb78 z`GVF8tBhOuQofS6TE>PkUWHaIUuFK8rStW?$*P+M(qPkoYz_5|oU|75wKKD2==@@l zM}0MSb7#X~?j>Wq4lO6g7$L3hH?cAsCyVeBUyS%-mnzU&*VvP1wQ~9y;T# zd@B#Ip7?*yVn)y2r1jg%FR}WDkvr(oT{p&;(XzF_y1$lhv3h>FXCZZU>L6Ww=#=f7 zteLOp>owTwIhzLdC77&Zg*Tvo2`gn!Hqo<-kv}oB_{0ZWoLo9HrnX?|$^1#Sbl^<3 z&Ws3l_Viv zCut02H|bC(!CpG-^m(!ipb9YOY2F@0yUILP=838i6qZ||ymDQ9*ap~P06sK3_u@z@ z@3?%Q6z}F-!$RVaq-ddYRZ5Gh2AMf8`q-#qR7QpeIILPv*ts9c3#Cw*Vxh~Ky}m5P&ax8oAkzezkK+|yWy)mA(BEo1V;kA zf^r4_Iel@ia>{eUo=69Csuvl^owg}H3G4-xv50Qk)7;0h}@r`LZd zc{fd%yb=Cuz3dcNNsVU}r}bh~;aBc_{ziD(c)ege)8cD~m`1-}6a`U+(`RmmoY*KV z5mTF;k#Gu1Pd|OSbv}eF9+1*`kMCNlG6Ys78F&sq28ZpW2O4_YRdJo>NW4Q3;m~J6 zT%%l@l8;eB_l0r?2P_utHDn0Llouy}cBz6A<58)a(?y)YoJa7KT^AoG69E-Uj9Z|* zo=FEQ12ZKsLRr~ZFSR@s%qyW{Y9N@C`y|(9Y?1X;g=|x%Hnl+okCb4&iUD_uIW!AQ zp0l)wyQDzB9xgmCRkcuofs2S$JrXh49jppWPJRW6Z&{{a^{ggHoo{;90=+FC53mTO z1xV>%ys2EbeA@#~P4#@_eQ2T;(xxR2VfHavSckyFqp|4^$YchP2sChF&XC9%yCltR zoJ%jmIVw++%}o3)I!&3A17ALC++D{hCE{pJT#!kS$903wBc1!qn^b@TJ`ube)XX&A zdMbCB*D@y65|e@~0490U6O2+d52huM(Ez8s25FGjrcy>{x3+yp+{NHkrF5Ud)4i+u zF4t#H?4Vw}LCKp)PF;Oo+O;`nCC3!sK%%M~4e9)IG))bMYd0@rUsc;lv0;3AM_hDT(h=))O4T zSVAM#Kf1aoQlR+!i+ChCO^z!I@M?h#;}Sv?pgI42Ym0cTo4XVV>^sP@XM}w^ zt_%@vMn#xfVDs&6JUqD?1R;K(Mu>Dn=ct4x;gw`~HDQqu(MkJ8__e7K+Q-r`;;Rvt z1azQ3yF7g3`4^rGU${&C(4JqNnWKdZuYkwE&@IaAkraNX2Xw9~MR*1D@3}A?7Gw`i zW^u3%ZxOGu5OFf~p@ZX8V={%JSHsWpUNq`wl6u%isCsUAx|iXV@@v9lU3|VDAp#PQj*XwJf{$6AxeO$1nro)`F=`&e zJsW}q>RgTlp?Zy;J``ukJ|wf{g% zNuigIW9K0ZBEr%Hb@46nTZp@KxlVty1+#6qfkL?r79A>V1EqFuVjC8D&+ZFFn{#TUzni86y_zfi&F4)#%lj)3-7%?|} z^w4lp&Iw^3%?Tmi?_i>BZnrmy31@r#_+G(k9U{w`QQ%_*zTDySgO*IF%aD>EP@>V& z1AYSX1ucX=4%+#}_o+xc@$|h$ZRMSqbmspQ#Zen_hIvVyr nfvq3)`Ae9PAwwi}gNPyMp;f$n*KY)0^= 0, "Sampling temperature cannot be negative" + assert args.speaking_rate >= 0, "Speaking rate must be greater than 0" + if args.vocoder_path: + voc_name = args.vocoder_name.lower() + assert ( + voc_name == 'vocos' or voc_name == 'hifigan'), "If you use an external vocoder, please, specify which one" + return args + + +def vocos_inference(mel, model_vocoder, denoise): + # sample_rate = 22050 + n_fft = 1024 + hop_length = 256 + win_length = n_fft + ''' + input_info = model_vocoder.get_inputs() + for input in input_info: + print("Name:", input.name) + print("Shape:", input.shape) + print("Type:", input.type) + print("-" * 20) + ''' + # ONNX inference + mag, x, y = model_vocoder.run( + None, + { + "mels": mel # mel['mels'] + }, + ) + + # complex spectrogram from vocos output + spectrogram = mag * (x + 1j * y) + window = torch.hann_window(win_length) + + if denoise: + # Vocoder bias + mel_rand = torch.zeros_like(torch.tensor(mel)) + mag_bias, x_bias, y_bias = model_vocoder.run( + None, + { + "mels": mel_rand.float().numpy() + }, + ) + + # complex spectrogram from vocos output + spectrogram_bias = mag_bias * (x_bias + 1j * y_bias) + + # Denoising + spec = torch.view_as_real(torch.tensor(spectrogram)) + # get magnitude of vocos spectrogram + mag_spec = torch.sqrt(spec.pow(2).sum(-1)) + + # get magnitude of bias spectrogram + spec_bias = torch.view_as_real(torch.tensor(spectrogram_bias)) + mag_spec_bias = torch.sqrt(spec_bias.pow(2).sum(-1)) + + # substract + strength = 0.0025 + mag_spec_denoised = mag_spec - mag_spec_bias * strength + mag_spec_denoised = torch.clamp(mag_spec_denoised, 0.0) + + # return to complex spectrogram from magnitude + angle = torch.atan2(spec[..., -1], spec[..., 0]) + spectrogram = torch.complex(mag_spec_denoised * torch.cos(angle), mag_spec_denoised * torch.sin(angle)) + + # Inverse stft + pad = (win_length - hop_length) // 2 + spectrogram = torch.tensor(spectrogram) + B, N, T = spectrogram.shape + + print("Spectrogram synthesized shape", spectrogram.shape) + # Inverse FFT + ifft = torch.fft.irfft(spectrogram, n_fft, dim=1, norm="backward") + ifft = ifft * window[None, :, None] + + # Overlap and Add + output_size = (T - 1) * hop_length + win_length + y = torch.nn.functional.fold( + ifft, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length), + )[:, 0, 0, pad:-pad] + + # Window envelope + window_sq = window.square().expand(1, T, -1).transpose(1, 2) + window_envelope = torch.nn.functional.fold( + window_sq, output_size=(1, output_size), kernel_size=(1, win_length), stride=(1, hop_length), + ).squeeze()[pad:-pad] + + # Normalize + assert (window_envelope > 1e-11).all() + y = y / window_envelope + + return y + + +def write_wav(model, inputs, output_dir, external_vocoder=None): + + print("[🍵] Generating mel using Matcha") + ''' + input_info =model.get_inputs() + for input in input_info: + print("Name:", input.name) + print("Shape:", input.shape) + print("Type:", input.type) + print("-" * 20) + ''' + + if external_vocoder is not None: + + mel_t0 = perf_counter() + mel, mel_length = model.run(None, inputs) + mel_infer_secs = perf_counter() - mel_t0 + print("Generating waveform from mel using external vocoder") + + vocoder_t0 = perf_counter() + wav = vocos_inference(mel, external_vocoder, denoise=True) + vocoder_infer_secs = perf_counter() - vocoder_t0 + + wav_length = mel_length * 256 + infer_secs = mel_infer_secs + vocoder_infer_secs + + print("wav length tensor shape") + print(wav_length.shape) + + wav_secs = wav_length.sum() / 22050 + print(f"Inference seconds: {infer_secs}") + print(f"Generated wav seconds: {wav_secs}") + rtf = infer_secs / wav_secs + if mel_infer_secs is not None: + mel_rtf = mel_infer_secs / wav_secs + print(f"Matcha RTF: {mel_rtf}") + if vocoder_infer_secs is not None: + vocoder_rtf = vocoder_infer_secs / wav_secs + print(f"Vocoder RTF: {vocoder_rtf}") + print(f"Overall RTF: {rtf}") + + wav = wav.squeeze(1) + + else: + print("I entered the inference function!!") + wav_t0 = perf_counter() + out_model = model.run(None, inputs) # the tensor array with audio values + model_infer_secs = perf_counter() - wav_t0 + + print("Inference time in seconds: ") + print(model_infer_secs) + + num_spec_frames, wav = out_model + wav_length = num_spec_frames * 256 + wav_secs = wav_length.sum() / 22050 + + model_rtf = model_infer_secs / wav_secs + print(f"Overall RTF: {model_rtf}") + + wav = wav.squeeze() + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + return wav[:wav_length[0]] + + +def write_mels(model, inputs, output_dir): + t0 = perf_counter() + mels, mel_lengths = model.run(None, inputs) + infer_secs = perf_counter() - t0 + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for i, mel in enumerate(mels): + output_stem = output_dir.joinpath(f"output_{i + 1}") + # plot_spectrogram_to_numpy(mel.squeeze(), output_stem.with_suffix(".png")) + np.save(output_stem.with_suffix(".numpy"), mel) + + wav_secs = (mel_lengths * 256).sum() / 22050 + print(f"Inference seconds: {infer_secs}") + print(f"Generated wav seconds: {wav_secs}") + rtf = infer_secs / wav_secs + print(f"RTF: {rtf}") + + +# taken from: https://github.com/coqui-ai/TTS/tree/dev +def save_wav_scipy(*, wav: np.ndarray, path: str, sample_rate: int = None, pipe_out=None, **kwargs) -> None: + """Save float waveform to a file using Scipy. + + Args: + wav (np.ndarray): Waveform with float values in range [-1, 1] to save. + path (str): Path to a output file. + sr (int, optional): Sampling rate used for saving to the file. Defaults to None. + pipe_out (BytesIO, optional): Flag to stdout the generated TTS wav file for shell pipe. + """ + wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav)))) + + wav_norm = wav_norm.astype(np.int16) + + if pipe_out: + wav_buffer = BytesIO() + scipy.io.wavfile.write(wav_buffer, sample_rate, wav_norm) + wav_buffer.seek(0) + pipe_out.buffer.write(wav_buffer.read()) + scipy.io.wavfile.write(path, sample_rate, wav_norm) + + +# taken from: https://github.com/coqui-ai/TTS/tree/dev +def save_wav(wav: List[int], path: str, pipe_out=None) -> None: + """Save the waveform as a file. + + Args: + wav (List[int]): waveform as a list of values. + path (str): output path to save the waveform. + pipe_out (BytesIO, optional): Flag to stdout the generated TTS wav file for shell pipe. + """ + # if tensor convert to numpy + if torch.is_tensor(wav): + wav = wav.cpu().numpy() + if isinstance(wav, list): + wav = np.array(wav) + save_wav_scipy(wav=wav, path=path, sample_rate=22050, pipe_out=pipe_out) + + +def load_onnx_tts(model_path, vocoder_path, use_cuda): + + s_opts = ort.SessionOptions() + s_opts.intra_op_num_threads = 2 + s_opts.inter_op_num_threads = 1 + + if use_cuda: + providers = ["GPUExecutionProvider"] + else: + providers = ["CPUExecutionProvider"] + + model_tts = ort.InferenceSession(model_path, s_opts, providers=providers) + vocoder_tts = ort.InferenceSession(vocoder_path, s_opts, providers=providers) + + return model_tts, vocoder_tts + + +def load_onnx_tts_unique(model_path, use_cuda): + + s_opts = ort.SessionOptions() + s_opts.intra_op_num_threads = 1 # amb 8 varios cops tira a 0.29 segons / amb 1 varios cops sobre 0.60 segons + s_opts.inter_op_num_threads = 1 + # s_opts.intra_op_num_threads = 3 # total number of CPU's + + if use_cuda: + providers = ["GPUExecutionProvider"] + else: + providers = ["CPUExecutionProvider"] + + model_tts = ort.InferenceSession(model_path, s_opts, providers=providers) + + return model_tts diff --git a/scripts/speakers_conversion.csv b/scripts/speakers_conversion.csv new file mode 100644 index 0000000..dc9ef91 --- /dev/null +++ b/scripts/speakers_conversion.csv @@ -0,0 +1,8 @@ +caf_09598,f_cen_095 +caf_09204,f_cen_092 +0727cbbacecb12625985f110ce26f03ded913c0e099740f967ba193c1ffd15a00b9a8b50fe94a4d9c3794c5e12d69d9923577ee216ae44fcf7633c9ca215723c,m_occ_072 +pau,m_cen_pau +7b7593f44cc6f9f7b21495bca6f3d564f73f36b97ee15d51a783da8141463834022996c55e494800d21304079aefa8a5fe64350c9273e0d36453b097b2dcc5f4,m_occ_7b7 +51c11eccd83cfec93c59dae87eddb5722fc9e5534b09636a3ce36a18e74762f728b3c5cd1046a5f58b646bc8897c3eabcf14fe943b7697f08cc583c72890f012,m_val_51c +caf_06311,f_cen_063 +caf_05147,f_cen_051 diff --git a/server/__init__.py b/server/__init__.py new file mode 100644 index 0000000..244b868 --- /dev/null +++ b/server/__init__.py @@ -0,0 +1,34 @@ +from fastapi import FastAPI + +from server.helper.config import ConfigONNX +from server.exceptions import LanguageException, SpeakerException +from server.exception_handler import language_exception_handler, speaker_exception_handler +from server.views.health import health +from server.views.api.api import route + + +def create_app(model_path, vocoder_path, speaker_ids_path, speech_speed, temperature, mp_workers, + use_cuda, use_mp, args) -> FastAPI: + + app = FastAPI() + + @app.on_event("startup") + async def startup_event(): + config = ConfigONNX( + model_path=model_path, + vocoder_path=vocoder_path, + speaker_ids_path=speaker_ids_path, + speech_speed=speech_speed, + temperature=temperature, + mp_workers=mp_workers, + use_cuda=use_cuda, + use_mp=use_mp, + unique_model=args.unique_model + ) + + app.add_exception_handler(SpeakerException, speaker_exception_handler) + app.add_exception_handler(LanguageException, language_exception_handler) + app.include_router(health) + app.include_router(route) + + return app diff --git a/server/__pycache__/__init__.cpython-310.pyc b/server/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05bc450d7d59647ad5210353c40a7bef12fb08c6 GIT binary patch literal 1253 zcmZWoON-P%5Keb8c}-@Wb=T)X(c2uH!IKveMHB=X7raPNLYwsLMDwz{vp#S|*@Xnb zlNXQf(ZAGJPyPbIi&iDG>#IY)RCQJA@pV@@8uj_-%I0qlY|_Q#%M@G8?&*RL1Q;g<1V~}Cg#4oM|OSiqRLd(0VPH! z50t%<9pxSXxv=Xfua|%nYpxxsm{4^UL$0GmK*CBs!YeNVyd%nJN$a*|Yqf)xwsz~Z z#uKRNgSQJu2C?XT;%PrG6P`sCQ%fD}lrdx5|=rM%KXU8K)y5HZ(;3gk*`0wBq8ccfoDZ85PeMS!+-gf)a? z2xElf2pb3^fG@_6E3;*p^BJpnDlX5qUdBr%@-0z5;jxl4rQ{`6r8CLJ4i~d1N{duQ z(X@K0M=J&+zQYTJ8RTgI`j%2k{(date}8o$7Mv*_v8tM0&B~Z%^4j#T7!zMmh@joF z=!5DLEJ^;qRAK;L!a;x$GlY#lt))er)d`QfOD2Si7QAAUVb2Ek`!5?EL%%6Gg@H^c zP@F|)Y)70#ID-cNcLR7;CGu&TE$V6N|Iv*&NR5~&G{fR4$S1n^LklC__|058Ku%m4rY literal 0 HcmV?d00001 diff --git a/server/__pycache__/exception_handler.cpython-310.pyc b/server/__pycache__/exception_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c31a5a3d82cf1c4b32922f33bddd3758916cd7b GIT binary patch literal 1043 zcmZ`&J8u**5VpO~djusRN=N|d+C$<_P$LwCXb`$YNrO(H)v}(;?PXusK1iYE3e=ZB4pb)51U z8csM(NJ@v48lNRBwT4#dTZx@ILnn2IZn?G-FZG8$`VLs!1!rV)kNfZKVWUP|L>jz_ zNRzj=NwJF3VLna~M)!<5D8A)VM? zu*`4lka263aQiFc&PRr1-NDIcO31-LD7zJ)3I_S3aRqVV-Kq=^n`k~7zyEh7PuZYcO zBzmFJzju1Q6y`$;^KA7}y*Y-Yz>ZX%u1YdWm&wi%E~%HS{d;N7rwQE07hI8SlQx}a KZMtYJH~s>_3KoF? literal 0 HcmV?d00001 diff --git a/server/__pycache__/exceptions.cpython-310.pyc b/server/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6723aa38e2010f5caf4b8560ded8eab64b0ddac4 GIT binary patch literal 725 zcmZ`$%}&BV5Z>*N7L){t7dd=?nN6yiyNd zd;~msa%TC*Fv(2!`*vpMn`u%i6&c#k=R@ZUzuyEl%|g&XS7#XRSi~a!fiBs*>2t(J zWFj^-HX^IdTsy?fEi(}{&}TIalKDJhzJYI;9Yfb!zBRAR%k5MKPZDllLz!lAGR%u= z&WJb)4J=<8oyl*|XEefuPnbY`Z8qgimjg9*joebQ-_c;o;1IdrORtv z6m;c!mrj2{?cF3$53uP44psUXa6lBDcZHE++9=Ta$F+a`)_Z`X?Q&hEhpXU)ZcwHS gLQU7q3oFffR?Tm8UzO+1)qJJbG_)e|Wp-l!0h@-D`2YX_ literal 0 HcmV?d00001 diff --git a/server/audio_utils/__init__.py b/server/audio_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/audio_utils/__pycache__/__init__.cpython-310.pyc b/server/audio_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67224a736bef5abef6e1a4b1413e0ba4fac7d978 GIT binary patch literal 160 zcmd1j<>g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hsenx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fVrfcdetc<3W=^qwe0*kJ dW=VX!UP0w84x8Nkl+v73JCNbUOhAH#0Ra31Cd>c; literal 0 HcmV?d00001 diff --git a/server/audio_utils/__pycache__/audio_utils.cpython-310.pyc b/server/audio_utils/__pycache__/audio_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91ffc358dc45e07d67b7c64185cb12edb9512104 GIT binary patch literal 2507 zcmZWr&2J>d6|buPoSB{vd%ciVR*8WKGze@VgpgK1!lzgy%ti(QBT%c|RbIE<(>+^N zZLdj<1TohY*GLi4%rSpMj=AJ-C~@PIQ*zeYL%Uwys$)vvnl_qzn1 z_y76%;fF&)enjK?TEV)uJ$G#)U5QU!6cv<p%g9nXhgzcP( z1LZo(8IHbsFkv2)e?d_nfcS=Xh#?EIBzGYx?~^$*kg$fxAt(F9&;>PYN!xD}ZT2|Q zGmuh<`~yZlU}L&B_N;ebXxmky;_1P;7Rp9a+?)! z|9}zU%g^ua9@SH^o6STaU)sHQo*!j$3i&w}xmLScs~59bv8#kU6>_)jrO`#HuIoFq zbK9Mj*?F6i0rac3K(MbF9kAH*>Bt)dG4m<=uiStSIi_U&^g+R?xD@>xly9ge7i130 zJ*IOXB%22yy?F@IpGP2r<8U79Xb~^TyrVl8U%oK!nckuc{+aCN$Bqx7w7%u*JY6R~zx*cb~#``S4zNlvMFc zM168bDZ%%1C>YPlRXlFdIM|aE)?<6B#Z_8xWrvkGORqb6jS^{A7MT)iJd`1}=uB%a z$`sOXqtiMEL)ruyiTnuL3T}H^OlLx7x{<;@x$3XQ>YG~2;-En=#A(WlT&JmxGId_% zfY$*4CKacm(rH=OGl^)mfr2J8>y@IiL)lbmQKe#aU~GI}R5+5b-mKB%zU=_qw5Z?+ z0rDO%Z4I*aJcPMBH}WQQFQ33M`YyDn?x)z7{0;hUt(&*eX?8VrA7^Ldt68iEwGjlv z@Y(^m6(j5nR@Tn5h2b;cQ?G$wUx#%4#x!P7;7usN(Y@hHwl$;OpGJLv?*DFIZrzYC zKnFRVq4k49q5U=#^#ceEG-$}*$@lC600A(9GrT0HRBmc!Le3u1zXG7Sx9|)A1Y^MR z!3tZ;1oAn6V|WXoTtN!}w7&?J!(kZw3cg=nhcApWFg-WQa4|B2C7ln=5Fi}weRXri zmauc%>pi%$J*E;9{MF|68`~cuShtG`&~az`FLUSHZ*j4Gmp??`nAuQg63NPkN?9fk z#v3-6$)W<+i8xm>fc|WFSj%aqZH)LnEJ|Uc>4`$avRc_7FKZwp4@3L;QBm@A){;45 zIFr!Vaa~l_uV(;dAIaZ_QcUYpVHu!5dJ9SN?nmR1d>;Ccnd39`+PSq6&M7_Qw!fZfT4hsV>C6VsW8=EfDRQm#K>Gz+&F78Hf^7_fwOfhz~p zhuUX}hn7#{ON1+0w=cVX7d)lIyTQ&H`S0>N+qSk9m9Jz$L^6j+bnI#Yw3%U4~q)EIT`Zdk1GH@1S@O zgpE~pD$=v;blj0|qSs~qkEmT`{wmczL@J4^`DMyp1)qST5)hb-@G^mvJ$)(h%lM}J zEmZPl6tAFo9faKkw#XYPfiQLs8>CCsa^7&-l)6}<2f*EeY70?R3a!P?;~QN`yvFTD zdkgM}_3yz7vRfIagr}no&?W}5-AshS|{~X+f=xD}mty{{qh2zaA k9lgb`LAIMxyn%O$x(y;GYbr~C&Ju=l%ZqQuzfYd|Ka^^OV*mgE literal 0 HcmV?d00001 diff --git a/server/audio_utils/audio_utils.py b/server/audio_utils/audio_utils.py new file mode 100644 index 0000000..f5b32dd --- /dev/null +++ b/server/audio_utils/audio_utils.py @@ -0,0 +1,106 @@ +import asyncio +import tempfile +import os +import io +from concurrent.futures import ThreadPoolExecutor +from multiprocessing import Process +from starlette.websockets import WebSocket + +from server.exceptions import SpeakerException +from server.helper.config import ConfigONNX +from server.workers.workers import worker_onnx_audio + + +async def play_audio(queue: asyncio.Queue, websocket: WebSocket): + while True: + # get the next audio chunk from the queue + audio_chunk = await queue.get() + + # check if this is the end of the stream + if audio_chunk is None: + break + + # send the audio chunk to the client + await websocket.send_bytes(audio_chunk) + # print a message for debugging + # print(f"Sent audio chunk of {len(audio_chunk)} bytes") + # receive any data from the client (this will return None if the connection is closed) + # TODO needs a timeout here in case the audio is not played (or finished?) within a given time + data = await websocket.receive() + # check if the connection is closed + if data is None: + break + + +async def generate_audio(sentences, speaker_id, audio_queue): + config = ConfigONNX() + model_tts = config.model_tts + vocoder = config.vocoder + speaking_rate = config.speech_speed + temperature = config.temperature + speaker_config_attributes = config.speakerConfigAttributes.__dict__ + + loop = asyncio.get_event_loop() + with ThreadPoolExecutor() as executor: + for sentence in sentences: + sentence = sentence.strip() # removes leading and trailing whitespaces + if len(sentence) > 0: # checks if sentence is not empty after removing whitespaces + content = await loop.run_in_executor( + executor, + generate, + sentence, + speaker_config_attributes["speaker_ids"], + model_tts, + vocoder, + speaker_config_attributes["new_speaker_ids"], + speaker_config_attributes["use_aliases"], + speaker_id, + speaking_rate, + temperature + ) + await audio_queue.put(content) + + await audio_queue.put(None) # signal that we're done generating audio + + +def generate(sentence, speaker_ids, model_tts, vocoder, new_speaker_ids, use_aliases, speaker_id, + speaking_rate, temperature): + print(f"Processing sentence: {sentence}") + + if speaker_id not in speaker_ids.keys(): + raise SpeakerException(speaker_id=speaker_id) + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + else: + input_speaker_id = speaker_id + + # Create a temporary file name but do not open it + temp_fd, tempfile_name = tempfile.mkstemp() + os.close(temp_fd) + + p = Process(target=child_process, args=(tempfile_name, sentence, input_speaker_id, model_tts, vocoder, + speaking_rate, temperature)) + p.start() + p.join() + + # Read the data from the temp file + with open(tempfile_name, 'rb') as tempf: + out_data = tempf.read() + + # Remove the temporary file + os.remove(tempfile_name) + + out = io.BytesIO(out_data) + return out + + +def child_process(tempfile_name, sentence, input_speaker_id, model_tts, vocoder, speaking_rate, temperature): + # sentence, speaker_id, model, vocoder_model, use_aliases, new_speaker_ids, temperature, speaking_rate + wavs = worker_onnx_audio(sentence, speaker_id=input_speaker_id, model=model_tts, vocoder_model=vocoder, + temperature=temperature, speaking_rate=speaking_rate) + with open(tempfile_name, 'wb') as tempf: + model.save_wav(wavs, tempf) diff --git a/server/audio_utils/audio_utils_orig.py b/server/audio_utils/audio_utils_orig.py new file mode 100644 index 0000000..d2c32be --- /dev/null +++ b/server/audio_utils/audio_utils_orig.py @@ -0,0 +1,95 @@ +import asyncio +import tempfile +import os +import io +from concurrent.futures import ThreadPoolExecutor +from multiprocessing import Process +from starlette.websockets import WebSocket + +from server.exceptions import SpeakerException +from server.helper.config import ConfigONNX + + +async def play_audio(queue: asyncio.Queue, websocket: WebSocket): + while True: + # get the next audio chunk from the queue + audio_chunk = await queue.get() + + # check if this is the end of the stream + if audio_chunk is None: + break + + # send the audio chunk to the client + await websocket.send_bytes(audio_chunk) + # print a message for debugging + # print(f"Sent audio chunk of {len(audio_chunk)} bytes") + # receive any data from the client (this will return None if the connection is closed) + # TODO needs a timeout here in case the audio is not played (or finished?) within a given time + data = await websocket.receive() + # check if the connection is closed + if data is None: + break + + +async def generate_audio(sentences, speaker_id, audio_queue): + config = ConfigONNX() + model = config.synthesizer + speaker_config_attributes = config.speakerConfigAttributes.__dict__ + + loop = asyncio.get_event_loop() + with ThreadPoolExecutor() as executor: + for sentence in sentences: + sentence = sentence.strip() # removes leading and trailing whitespaces + if len(sentence) > 0: # checks if sentence is not empty after removing whitespaces + content = await loop.run_in_executor( + executor, + generate, + sentence, + speaker_config_attributes["speaker_ids"], + model, + speaker_config_attributes["new_speaker_ids"], + speaker_config_attributes["use_aliases"], + speaker_id + ) + await audio_queue.put(content) + + await audio_queue.put(None) # signal that we're done generating audio + + +def generate(sentence, speaker_ids, model, new_speaker_ids, use_aliases, speaker_id): + print(f"Processing sentence: {sentence}") + + if speaker_id not in speaker_ids.keys(): + raise SpeakerException(speaker_id=speaker_id) + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + else: + input_speaker_id = speaker_id + + # Create a temporary file name but do not open it + temp_fd, tempfile_name = tempfile.mkstemp() + os.close(temp_fd) + + p = Process(target=child_process, args=(tempfile_name, sentence, input_speaker_id, model)) + p.start() + p.join() + + # Read the data from the temp file + with open(tempfile_name, 'rb') as tempf: + out_data = tempf.read() + + # Remove the temporary file + os.remove(tempfile_name) + + out = io.BytesIO(out_data) + return out + + +def child_process(tempfile_name, sentence, input_speaker_id, model): + wavs = model.tts(sentence, speaker_name=input_speaker_id) + with open(tempfile_name, 'wb') as tempf: + model.save_wav(wavs, tempf) diff --git a/server/exception_handler.py b/server/exception_handler.py new file mode 100644 index 0000000..97142a0 --- /dev/null +++ b/server/exception_handler.py @@ -0,0 +1,23 @@ +from starlette.responses import JSONResponse +from fastapi import Request + +from server.exceptions import LanguageException, SpeakerException +from server.helper.config import ConfigONNX + + +async def language_exception_handler(request: Request, exc: LanguageException): + speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ + + return JSONResponse( + status_code=406, + content={"message": f"{exc.language} is an unknown language id.", "accept": speaker_config_attributes["languages"]}, + ) + + +async def speaker_exception_handler(request: Request, exc: SpeakerException): + speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ + + return JSONResponse( + status_code=406, + content={"message": f"{exc.speaker_id} is an unknown speaker id.", "accept": list(speaker_config_attributes["speaker_ids"].keys())}, + ) diff --git a/server/exceptions.py b/server/exceptions.py new file mode 100644 index 0000000..c833147 --- /dev/null +++ b/server/exceptions.py @@ -0,0 +1,8 @@ +class SpeakerException(Exception): + def __init__(self, speaker_id: str): + self.speaker_id = speaker_id + + +class LanguageException(Exception): + def __init__(self, language: str): + self.language = language \ No newline at end of file diff --git a/server/helper/__init__.py b/server/helper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/helper/__pycache__/__init__.cpython-310.pyc b/server/helper/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97ea18f4d12a574d3ba03b2c047523b022b35a62 GIT binary patch literal 155 zcmd1j<>g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HWenx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fMruw0kcyAb%*!l^kJl@x Xyv1RYo1apelWGStx0ne?urL4sB7Y>M literal 0 HcmV?d00001 diff --git a/server/helper/__pycache__/config.cpython-310.pyc b/server/helper/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6310b02805d1f06356d1bbe167c5d9387126442a GIT binary patch literal 1605 zcmaJ>OOF&c5VqZq>7JckmR(>`qJ-s=&%{C!_;$nRS zu(%J?`~)D0qyE0D8YCob&-8?L$D;j{v|?-D8};4^YGX;@NIFl(c48W(vL(ltv>UA|8>IytuI#vM`14vnQQFw3 zGm2-O6gEVuY+$#Rs^E=imv-{3&S0fKuDU9J+JO-o*u8^B3GgvQ_n>Wc{(ILdWB1_5 z^ETk8e4~m(y#XQW34q=HXv%YG+!;E=y%9o$5F=oI^d`UuqIcl+_ubjDF4aslD%ZDX zk6vU;q02|QK2lk0W>Br0qRD4Q>1RsMma2frXPGO)w0U78&hsj7IllqE3_1*1NY|gO z;CS~_(WWc=4@@Hi=ZTL#Dp4xV?U-Xex&q}1=TAFP3_JP~*t-8+gI(150N=wj7-B>i zWpwKwr$r8ysufuw%>PjCTlj88IXOC`TOh+zlKPHzk=_EM;eRtE*J_}Q+;wF80R%~a zI0FKQL1qp;hrS%hM&5-zeNVB&fM9I?I2qZCjxb%(=14NExZPN*r@gRRl%f&~*o_NO zEjkd5y-=y=tFN&5b_1P>A{PdBuNvr*=4@Y_aVudO?ggSRB4FhPa9PE4{|3`w;Gb2K z9q%8UPsy{=ewHFk57x+~=ne>yzGOYqla{=rE$itCqyiG<_2kIcyMQ3^K(YmDL)V!j zmnd)k_&CVKO=XhE`bVa&tdG}<_3K7e8tbU9A$*PS4FX;#iMJLeD*hYmKIo2Mx>$V} z+cabmjo6qS-}+LlUrMUJ44#reUxAn2L%53I=(~^7r$jr+WgC;sVH#}Mh>YoE>=>7f z^mWj{CE&VmI&|OEtS(lK{Igd~rt_vX)4V!VT2+~H7p=z3yG9t;d(Oc&x;wPnJqori R{)kSz31w{O8r#|V?q5pxdr$xX literal 0 HcmV?d00001 diff --git a/server/helper/__pycache__/singleton.cpython-310.pyc b/server/helper/__pycache__/singleton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..380d9881d67830405c5c49c6c3bb615fca440a97 GIT binary patch literal 663 zcmYjPJ!>2>5S@{9>+`aQ!MJd`v=@k%wBbln1kweAlV%$sw6bq6`xQy&aA7+K+y(L< z()cf_b(KC<8fUa;A2%?w8qM(Lz0r2FSqfx-e|@@C2yh>heG`4 zFW=(#4#Oce)XF(2@8I9teAzW7SG}q1tNeVUm&!Khwp$tPUGBYmsro8+#$FqnFHKEj za`&Y2tl!v#W*(5cV+sNjOj(Ky*#pDuAr;?`cRM-T5t~Mmt!j*vv?$qh<+_gNOv None: + + self.speech_speed = speech_speed + self.mp_workers = mp_workers + self.use_cuda = use_cuda + self.use_mp = use_mp + self.config_path = config_path + self.vocoder_config_path = vocoder_config_path + self.show_details = show_details + self.args = args + + self.synthesizer = Synthesizer( + tts_checkpoint=model_path, + tts_config_path=config_path, + tts_speakers_file=speakers_file_path, + tts_languages_file=None, + vocoder_checkpoint=vocoder_path, + vocoder_config=vocoder_config_path, + encoder_checkpoint="", + encoder_config="", + use_cuda=use_cuda + ) + + self.speakerConfigAttributes = SpeakerConfigAttributes(self.synthesizer, speaker_ids_path) +''' + + +class ConfigONNX(metaclass=Singleton): + def __init__(self, model_path, vocoder_path, speaker_ids_path, + speech_speed, temperature, mp_workers, use_cuda, use_mp, unique_model) -> None: + self.speech_speed = speech_speed + self.temperature = temperature + self.mp_workers = mp_workers + self.use_cuda = use_cuda + self.use_mp = use_mp + self.model_path = model_path + self.vocoder_path = vocoder_path + self.unique_model = unique_model + + # self.model_tts, self.vocoder = load_onnx_tts(model_path=model_path, vocoder_path=vocoder_path, use_cuda=False) + + # speakers_id_path es el JSON con los nombres de los speakers + self.speakerConfigAttributes = SpeakerConfigAttributes(speaker_ids_path) + + +''' +class SpeakerConfigAttributes: + def __init__(self, synthesizer, speaker_ids_path) -> None: + self.use_multi_speaker = None + self.speaker_ids = None + self.speaker_manager = None + self.languages = None + self.new_speaker_ids = None + self.use_aliases = True + self.use_gst = None + + self.setup_speaker_attributes(synthesizer, speaker_ids_path) + + def setup_speaker_attributes(self, model, speaker_ids_path): + # global new_speaker_ids, use_aliases + + use_multi_speaker = hasattr(model.tts_model, "num_speakers") and ( + model.tts_model.num_speakers > 1 or model.tts_speakers_file is not None) + + speaker_manager = getattr(model.tts_model, "speaker_manager", None) + if speaker_manager: + self.new_speaker_ids = json.load(open(speaker_ids_path)) + + if self.use_aliases: + self.speaker_ids = self.new_speaker_ids + else: + self.speaker_ids = speaker_manager.ids + + self.languages = ['ca-es'] + + # TODO: set this from SpeakerManager + self.use_gst = model.tts_config.get("use_gst", False) + + self.use_multi_speaker = use_multi_speaker + self.speaker_manager = speaker_manager +''' + + +class SpeakerConfigAttributes: + def __init__(self, speaker_ids_path) -> None: + self.use_multi_speaker = None + self.speaker_ids = None + self.speaker_manager = None + self.languages = None + self.new_speaker_ids = None + self.use_aliases = True + + self.setup_speaker_attributes(speaker_ids_path) + + def setup_speaker_attributes(self, speaker_ids_path): + + # model_inputs = model.get_inputs() + # use_multi_speaker = len(model_inputs) == 4 + use_multi_speaker = True + + # use_multi_speaker = hasattr(model.tts_model, "num_speakers") and (speaker_ids_path is not None) + + if use_multi_speaker: + self.new_speaker_ids = json.load(open(speaker_ids_path)) + + if self.use_aliases: + self.speaker_ids = self.new_speaker_ids + + self.languages = ['ca-es'] + + self.use_multi_speaker = use_multi_speaker diff --git a/server/helper/singleton.py b/server/helper/singleton.py new file mode 100644 index 0000000..cac2a0f --- /dev/null +++ b/server/helper/singleton.py @@ -0,0 +1,16 @@ +from typing import Dict + + +class Singleton(type): + _instances: Dict = {} + + def __call__(cls, *args, **kwargs): + if kwargs: + cls._instances = {} + + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__( + *args, **kwargs + ) + + return cls._instances[cls] \ No newline at end of file diff --git a/server/modules/__init__.py b/server/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/modules/__pycache__/__init__.cpython-310.pyc b/server/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..240a23498d5017e9b26e0b38b74b3c63967059fe GIT binary patch literal 156 zcmd1j<>g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!H$enx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fZhlH>PHM4!e0*kJW=VX! VUP0w84jYK59mwEfCLqDW002(;B_99) literal 0 HcmV?d00001 diff --git a/server/modules/__pycache__/tts_request_model.cpython-310.pyc b/server/modules/__pycache__/tts_request_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da612ebecaa7e988b552ea627627e1248187a8ee GIT binary patch literal 752 zcmZ8e!D<^Z5S6sM_IhpCDTIbzO0GU6PN6*%ObI=ty)=X-fwC7NR%3gER@zAG5C?in zulNDW$IYgO2Z}23gIQH-u0SuF6<)w?)w)aA^9y#YJx6t!V)yB-FWpbqPeO^_~_dx0(^yL1QlE@cs; zjG^I~N`qa9>#n+@5HuCZ z-$gPBjJ+RX#_xSG=`8YD9=bjdtoppiI^oNGyI+IxZPzTOu>Ie){JxW@4yr_9{*0_U zA0Bl~TJPkFj>|+W%1B<>+BscZ+o3=8pJ8>`QmS0m`6?`BA@tYW)hB0v^K@n|yD4>> zf$}JDQDSTCvE=^#!9jRvE9LH=<(N5Yto@lUFZQGNZ@lRcOc-81{J`F5($4Mne*eR^ zw0^{C;?skimz?*PSKO|@-fj=Ra$(zabudQu@t)V$#hpH#;hYm99^<^QxM8lSv%`@f zZmE{97ctH7LVq#MIz(W4s9n|j+Pv}l*Xo{NriIN8r*g;P@k6^dzSn#Qrc=xH5novz z?@u%QVn3xL&Rgu@Ntd5duj*oZE1&bpFKKP z>(nR0%K^6-)LXZx#K@{bOqOmPJPx$jEZi>pG=wEFIH{&|QATKaVB2ey`}3^Val4l2 z6YJe+O7pgSnHif5Qz6+XZ~8xcn+GH&#!nv{;TvMao-Uitw0X2NX1}<5{DW$hj?#xx zeXA?<^w+%9nfFs4eZddcGmGY=@%Jc8v~Cdw+mqpYF+|@`et)s{m~i*A>{XGpH@;n_ zw=EkNUw5l+Qa0TEUHMa6>RH(H>5M<-sLNhk-L|HTm?AA{rv43q7C9^_87IE>TF9es zL%uv$J2Hz?nQR{@kb8Q3xW1Y4XFxXAN}J^VjS?MB{GY6xy;z|)D{%<_km+h%|Dms| z$^D1+B?s5?1EsFEXcBdf*Xg?IoNf1LT%NyvWa3iMc1S@eluPqx(%GddzC^md;{mGe6tdY{xAEXNkEETdWDp4aw z%*9`g_pD^RmLI3`jeV|~cl+8^2XVV3!rqNgiA}+P4uSo~Ai>(Rj^9fW_8-{&{w5Al z7jLR8on5+?!Ass8R5U7uf_DkjY8o^pcmoL>f zIr|5_8d*0KN7dz|56iza8nhAeCKjgkx4zBg@kW4jNO9}tFTql>D{lqvMJ3@oWZ90~ zqo!r_&7g|Bd$YD+b&jXHg2jfB+x2XRD5-b7tIQ#?^r~1DiEi5kTmD#-*3Irsd-Vic zzdMW(x@FUggT|Qkuglk@Gv3}-H88b`&C=tviuxJ$lRoOn1AH5gUWeNcF{GQn^rKo_ z=e4T7@aD_YV&}bM(n8p|-(AI`Gz!%=xL)6X|Jf?CKZy&AFKMLN@)hML64r|?q`kL4 zDpOzhCiZ$*CV1glwg6j77x}}JO0pxZ&dY~Y?wZ*2`W#&^qa*g5&ulGe#3GsN3Pe;t z#T8*(N*lyNnq8HCUu#0!lxl-K@WlD_uplY=)=XRdGp8ZoxOn#dR{D z`A4u+3$&CkI=E3q6OJ$HdrxTo+)WQ8dg%D(>Vi}sjZKT!#5>8HhEv9quFCtDVm9%^ zcv$Ld_Sc2K2!0)ojxL%Aqb}Ys_VK;Utb{s!W9T0A`>Xs;c~7)K6G7=!ZmVC!BX?t{ zr|5HIs=obs?|g;!Qa^EA`YV>Eq}G)2!gkR?<)&*tFzS3ek6(q1v|zDP-)<|56YhOw zYxp&e=JzVKOzs5^KQjGd$I;j&Da=Y5M);8N)s5ETs|V(zJUe(dOYUZBK4A*3)hX2vOo${C zd2#Da<4Y$b!P}C7T~%xD0mdl09y&a_TzS#o0fktcS>-6l_4oi2#MO&l(w8h6E1oEQ z#>tXQ%J}+xnpF2DbFtec_wS0fZrxaHzm;7@uD`wKRawZI$j*kr-+%u-bzBBEPuxoX z)6{4Qj9<-!_n%o%(~FmeOU$Hik7k7Q+wOlP;gkr)^>poTLvGDB-*11^6-@o%QTUFs0an*)ot`aLU&fqQ!S`ZQ*MG?@o ze11y$+@|2m|x1&t|_xr<7qD^Kl(})rj>yB1PU#{@~C`bUT)?O!%f#?ZOPH zZW4Wt1mT&j4NK?ICo*?_+O+Nfs{Djzk6*q|BCD%oSkpdY@V3VAcDH=2_lw(@F_EMS zInf@v;n%1l?ZO)nmPT9S6im=_lxd z%2A<`W2zdZl2P&q^M@`?>muH_4p-eMlPU;FJmq{J)BjCoj&$9{V`mg`cCqk0Dy@X>QkaXk9MK zkv)^fC||ON_aX|uk9xl5G0w-q4z`S^U8j5Wi&&Y4EU`=QA^9&qiS;Y| zCZ4kE{jxdkC>fRHm#FUKyW3Z+p8F0P9Fwqxv})duv~Hff>G`O+@I^dvpt@Vp*S*Lp z1ncF=*#SpJx}-Rqloa#rOTQ8oxvP#5J! z+N#c)e3&ZwMcJ0lzrG@J*Xi?y4WV|RrAphM_zNkHipVHdf{vB((scZ*L_<7PW^b6> z?z21NuaI)66w=gKlstM7{lVpC%R~to2{i#RNug|%F@G~vwQzuo;}1l0T?{);GIBFh z?U7YeSd5b+5y`?O<=XkFLJsxW>(>?k{z{H{SmzVlW#}Qeaw#o9%&xXlOT{|4@1hQ& z!48x83Tr+w-=h@Uu6s|3q=;`&i8iM^F8#iGbh-SlTg5?;{wTF-tXKyHg>HEHb`+I` ziESG%|5=Lkdykcm1D+CP4?kvd7o;(i6h1Jz$c24fTxUH6`QU?lw(9GVbnlyb3>n`h zM*H;2cGlA;GCt;SO5XqCD4yZXr0Y*}TY8G2QIebZbimB2%7wch@h3so{bvy~bwQm1 zKYk$n)6{Di9~;?clCAm-zx8+91$}$b@FFWfcRKRDdQOuUpApvC!;iud4t{@!R)d*i z9_1J2K6f?0m|<3Yq~FI$H~eXfis#v_cjiH~8+`?w4r%X|+jYDiEnRjcML`kT{H zPp;NCsk}>hv&yufHi>Wiw6CBXncS~hdGHP2*Ab5;i@|A+|HT;(Z=Z^X9j`#>^w%q@ z-o4^Zn5)M%#LA~?&2FP=l%m-Z-M>{wF83JMvB-Z`V0xRryO@4k{h3(8FfIiH4fzt` zRf-Hnq2yovTsKG*FX_e#Sl;QSzs;e+g?Hnr?K4MtZj$)f;;EI9^6+2N7YL;h8ojYi z^>JDAR5_6cI)=qpX?^7Te8A9>kKrpV$Gb-CAEpEG zzeq@m#xR1vl~libR?^B!F;!qyHSUV#VXH=OS7Q-u6RgWhyWQhis*>nxXG-^`Z3 zKI3tefA6I+Z>jEk9nbK8-x@xhb-ICjIXn3Njfae3&3EzeZ(hDa>~`am^mD8Ct%Y;^ zx)ToR?4n8%w;LZ6Hj*aBiG{UJ(-+UtrN(3sk}EilhD$E93Z*6CReh3VQKS}p?LJ** z=}392%{5hwx{_2*kr{K{V#{3pqgM1D&j}B4#|4M4vtNfXZaaqzDR0Qtl+hT@yv)p= ze$)S(d|9HLWtuy3-6$ZK&g*0Tfu5^Ab?vKsqdy4f>U4iUDEpjzf^dA}RkgQH^)FJ}=to9Zjx z)>&BEx0T>4{j?fm?GrO7dr{j^uJEIzx6v7 zPz*gITG&*ONG&d@GA|j>;hieaW_fn^&WxEa)0)4-6~~UeFLysUypj*QB=vTeUigi| zGo~*#Pl}m%q?GRF)&(RfuXJxIP!;5p6#8=_+1%`!kJEZ&6sOw5w%5ei3@)$BS9nD6 z=wrxbC+akDHEbPmPq6=%ciY{g)2qkHqd!h3#;D&rX#TViy@h*^7n4Z1qRHt%4IfD` z-6QbOiCbg8-}*wE(QBcWt2Z9_(Q;1&cE+|mj=YLFKq|RT)T%rz%=|tORhYQ_@LGtX zgjw1*rbGJdp1;go$e5>t-IiOQRXQD-nv%M|#UoAL+_j|4-p9Q3BwNJ8tXF6;7lVRy z+24Scc;=QF_QYMosZt`3PK+Al%ut=9`hGU^A}Ipr_rpAWIW8Hi8xNg#sCTlFFGX%= zmryjBn93tKFQ+p4jDs8w(6J37$qwv_YywNT)^K}Yma6|BF zgMcQxd!-XE(dLTsUp9H9{o&?#pfKqa8)E|1E(H^narWMmCPUtZ!feW9>#UmJSotYt zC%e0K!$s|(5dp}2VioR->F=8zKh(D0nSK2E9>XEc!8fiOqT8B!vn@SI1FzGs zoF!0mB2`z(a0suiJLhw?UOVzmr+bDAkY+A1G?|zYu zc<-6JTh4P)OgoLfF0;<-MwpMyv%JfZ}ONtV?Pyr38NZBM?YdCj|u^H3fzLJe~m$WpZ-U zI&OC9ko88CTc7HA>CkpFMY+7`(cm#)!Xw}}tdS^IOXy*}$69=V(;hFQfh+%uI23{H z&~b&1c4#4{Yk$%6!~Sf@Q#&>*lMh=rvJ7I@_HiCZakiW)XA^O9)lx*;&F!?3(hM(i zJ{o=eV%s_Ic!x{Ti~6R+bFQI8g37R3!7A@A4iWd`v!@60t}0Fx@%i17w`izETEeq5 z-ZE(WWNYP^pG9m%HZ52xRJe~;VtybL`|vR_HD-)$Bybxv0Pb%s}gX&iCy z$J)Kho_P8W7n=%gC#ZKoHkrEe4LVk2(L zEGt7G6(|7&xY_wyF$TK1y8B23N;AW9CBWaIWnN}Rn8epbn%P)Whf%@9+m2C)M~H`y zTPe`VUw~PLfKkfZ)?PwaQTbd1_$JNl=tW+$D*^wN-%7|%%vw-{ zTgX~Oh+9a=j-OjxL>zpz6&Dl}5fu^U6S4i5C^dHHpiE#@F2-^f_wsKYiw;Kls&xNtUx%O+^ihzcs<=6U>y+P z67o7~(#!%peE(h1akcWb2Mp588cyzhf&bmmcXG4S^RR~-ufOMuF}kqNsJID zoZgI5=%A=N`2dO_DC5s_UeC@8{S93Lu1@e4BO{!)5>_^7B|cXEc4!O$A9~Bi(aPPy z4%GK~Lj7mm>Hi^E_QE23;zA%g)`Aes_O^W7Vm4wT+&0#>BDVZ|e1i5i_OKiONcZuu z_YJV}wv%@No&s+`0>R!evcphb|DV(Wj&=}Ee4w`ZM7j9|^!WuP1cW67ggE&GB>4E4 zdCwi@g{t~L9!v55e__Qk@UD3Sn5Q zt0*ESdZhI~f^Ybqs<*+a79;crNfGvB4lLsOs%a|WF5wbhpr-P~*%AXw)V@l_z6u_0 z(9Dd0M>#uN#sDW@M@DFrGm0dlL?9RuYKrpufm7?VAvv>tVawaY0~+=Z)G+WP8Raiu z#3vDozQbzzogRsa^}tiFWZo+LCF&T^|6U@k!#8q_uP0x0xILkYXuCu_3RoS zL2eXvp@b!eBF9U_>lT{XL!G99_ z-X%L@0?wL6su3gt^`0zwlC7ktz2nTm#JBBaZg#OKbL4GEhwEU#pT3uYe4D8ui|tK~ zrmHMSz|^^CCx?qbgcnkgt8||jlYMO1jmx#M@@?3)F%_A_u!Bu6X=bHKi2oiAa~-_ric^S*d|N1xpDjVoVgh z)U|6343bQ9qrW|vd0fsk8RFL#rkb9bek;U*%ut2o%DIu#d3kdse6(J$oL?9btNg+6 z=cVDiyvGUaixiK?0Y`X{T)tMq6$C;@y~oN+K1VdO;)BIz`ef-3hQpWNj;=~k81*zR z#M-5%UIV0vE*MSWopPs3!>RKLU*5NFeBpongCU{`XLrB*oiXunRnYG&Hd%7f^e~y1 z5J%l}GoYj_v3S4ljM!o|@z(qH*joHkX9??@8u3&N7d~^F*6tldSRB^wc_@O_~5xA)MyMS0Z#0B@L&GQNz-|WQOq|LYzJ* zA2Ym{WA>#+{oWs7Hk`yQ;qIOhh9u>E^7<0i(R%DSObAgnyoV{h`a-rW+CMJ0l(?VX z%B^1$upi$38@{+NBl<=V`W+ssN)R^k{ zNL-B_1Z)4?#?EHAZzHeohDei2z(n0x(FdEq_G6em19`jyzoW-dSW;C`SDSgT*Uc04?Rx76%j6ItF$|07zTUDEwM~IFy<&(w(^V(7e8bBx42Bi#R zP|xOe?!m!@&v_k|H`KSPUoA;D{-VT7iI+@l3~GRq_R|vxY7(Oy;{Be1>Q*;b^{rYU zj$+t(p8ExljH!^+GN3~Id0 zRl{0!Q@Y25y9Ob^v_^%gDsf47jy<>|6{Nt{msuO0u+E8)SoD8BCD?V#8x6OdF{E9k zhH_X(78asg%jcdV>bg2N(XP3_lGRA3*f!wh=GE-L1Zd=Dq2|db-=nl$zd}>Kzlpam zzmmY2c&Q(3B6N6R2^yp9Jn^X>r!IdcUdUA+qh$Jn-L?l7Z1PeeBEK;nU~*B>O44wj z_;j$o3ubeNilk$NsYb&7n;l75B0{whA0y^t7^QoAAs*`UItVgdw8*=qf3@u|huYs2{D57{tNf|RCTUeF= zBx~JLNhMU8Xq*YNgN}g|ia^0T2;Uk7a<}cC%uBP?~5&|1x039eOy48rD7-rjI;{59=8L*rzSWXvN;^MG=zm`L2VI~i35jkuT?scO0 zh=hAY)Kgbqa9n!_3DS{ch<`ldY5|s z62m&2VI3?d3PTi3J`7wum*G_tcy*S1EA@gw87hL^J^>bg6&C*?p=+Vqpe%YD)K?DN z$Hw76Y&_2-vq;vW<5)l)B*BiIua`M?lWzypaeQxGm}Q2oiGi$XDE&)QnxMlS8}WS7 z2pg=UoS_2j^w%3oQr9obijuImFsNIFHDkk?wa)Brg!1?P)RFeti_j~Fjy5@RC<`qm z55M$THxL+nqKGy3J@f)`K_I>lL)9|fGj?_$P5#{3)n>`SVhcVa7z1E&M2}>sMprAg z2(pF3-(_&XJRcaoA;A$}V-bBJy~1r+&%>k$Rx<}7!nZ&lF2MXuAueJ1hxbK@{GxIwu?utKYd%2-i{nDhqlr z02J%O!gubNMw(a5J8PnyfqMuQQZ!e<>u9NwdSuOR01cYA4GbFJ(!=U@VDGDW*A28; z?y$YOQs}=H&ZnMI&WMEm9*Yetb0D9q1P}f4pp;wdK&s6(XxaVEustaQ`q02IY zkrD<=c`@9>WuInbt<0?iHug1~-ZiiPR4rcf*f<*{86O)Zg?r&D!Yq3*P4QF^DvfPJ~@QycnXKXvqP5PNx zQPxjcZ8%D_2c%LUwJ7#m8(erT!UbzYVw<21+c)G;VslNLt~M|%;_V-qc$dS*u0p4J zXU&R9(?(wtMq7LE@F&Pp#xU39tX95b1WZv_=Knue(e_1%|jU3G>wBI+0Z_(`vB! zpP(c!u-j;F5svyb5-|!Zbb|yteu=(KQ{#4p!Vy|a6M=xNTWPS7kkGfBRv!L0u~lbl9G& z**qhtH$P{LU>ncZ==vNxE&a660X%1JqNd>s+^4UG^QIZfrjZByi$a#aCg>~vwRSg~ z(xo$P=sxV78SI_>mKqHo*+$9V4pXSIvOxU?=iL|oUK5NF@O2~ow~7fX z0=xdHK6IQopg0Lt6)IrfI{`llnbL=Sc?jFG`!z559s!|2h(`7^xR1dH;YiWApl|{8 zLkl#D=WFzW?3$}H=uRjE2Su$d#8FO<_8UL|k;V?IDW502sYlb|1nEe-1}`1=?DEiz zB;9j>$mE{EAt~y`y6C7p9SHk72G5rE9ho9gW9bcEXx9cHT;GTDyUCSaPv%Fjm78G{ z5GADc=B%V773yoBU5sm(RQ%Ht3fg{`|zc5#-uM_Qo zGWQw0G+n3Gi<5sy{H;PLOk6{1Qgilfs{j6eqs~TNnUZJB za3y?a_}=5T2L6fDrTk6fD=s!>l_}$PsVO?Vrx$_aU4l@KG2X)QEg<@EA0_Y7U1ZTA zf1MEOuYuklj>__410bWQ@Si38dI*>C{-qs*D@PEx)OaaVdXIdFXB09ulnZv z{7dDX{$s>+QPgvZe`*)({@yu!`S@|h@Bp5UU!iH_e}bbE=AcxH$7=7Ilw6KExAR_1 zkG@}}gLA~`X1>QE#tw)I8qm9ZS z79QY40=1Nmq${hlP8HL|77(nMRLAO9+HwZSbaVv6wt#HXaa&f-#P&?v{nP?JdK7sp%a)2 z4gAA}6d4sHd%iKtU9-^mdlNIOc60fkoYQ}TIWv%BzUgaJ1WCSIaUoI!|OO}}iIntYNk@zB2bF#Eu2LXfvq;}k|0n4_6 zct;5(xBqGhp-~@`~l!m99i8 zj4$4XqLs!4glZCbvPJA9iV!nK@s@6VLOq?@)%&OSaDXY2nY@24June3tvz0L1oiZ1 z3;J--2a)EG;Mk*xB=C%rw6**j8+@#9$sT+>->wMHl`w%fN^;z@87nR7&#+6Uj;m=Y zTr9hkXMoI={B{Jb~;*;DfW*;FS&r#}6)i#;~>Ur^u*!uPRvdb+DD8D+Cg) zC&zf56{E;~QIm#m#UZi79ZpdpAeuY2Z;n~#dhWJAG2fRQ|?EOiElij{LXP54&euR3v_~mqpaWtxEo*O z&=EJ|*Tzzv#p&}FWS^}-r%X0T84}zkJxvhW@AY8@m^g*U^5{+q>FfqNKRPMJ;-xi+ z8}0t7=xh*v@z2>@AdrA*BTHUZ6z`P-&=BsRompI8|0A*%fwl`A)jD(_M9s7dL<4sM zHonq-W-&C z=KpQ+P zklUUT)4=BN_$;P{r8+$`*wWJYhsvnnv%xS4><2XcJ-4(HQY^rMq4L{lp#Mg&+wAOf zDh_1GQGjeG7wgQ;lKM!uKvcfg;(I666Fb;!+ zOfXoaj6?r(+&vuI*;=&&dkqbTUH@yAl(>2x;*c_0a@8D6gZHbuBTmi;OFLjR=vm8B<`Apjc%G~a> z_9yxR;0?N*_o8It+ri{`K}D3QYw&!pgMQfywho#7ShN(-EWg`7#rj16y1465^Rs14 z4*l^2^04LSkdic6;7rEC*US=(*0*Xso6K@N@2Y!bD|D2u95Dn>-wE-9sQSU9wEjV6 zd`rQN_FAOv56ta4JcEyv0jC>>vZIi@1q2|eL1O=M23nHRwfkBMEn=NtFON!IAcq7z ze*=6e%B1^ffHHb=r_~&*U*$0Opok#j<)%t$y0id9qYrhV__bjx#*3c|6ztFVHc6h| z2=55hfuFQJ2h}frn;l8Gl1n0|^)A^fyz=8y=5~W{6L|Y4*#5#kN9&=F#Howm2Xmg< zq1;IdLB=oNRwI~#13XtaH0DCOjvkW8nZM)hvE#6<=H+3}e*imh6F49fit9W1a_2(+ zM+1}uudEA+o*XIV8121-_H~3oN>E<7imH-mfkwuXR{pu_8;?Y~-~PT$ z4GREo8El_U^3D}uxQ>6%WW)GgdeV2$!C|{{`oj%v26&Xy-}Qy0Qv{Uj?_Jpi4q^cx zr^DN5{opF|-ReG8jpl^}FJ`&$ckN=l?h{NbM!orQuzmxeAG0SZvlzqmW0RNxR{yQn zJX)#wt7-9)3vXX)L#2p%4oXoRphTDex{ltDS6Ar2B^310R89{czI*KU%XNuAfH?+!JB%NwLQ28r`w=qjgH$3PjWdeWDU7Cu2U|M`LARH8~)|XaFM7!3ECy z5f$P5eJ@v)KA(hLYaScD+v|~Gfet@C2>;Iyj}Z61$5}HFGkW|O_qqwpB%by14~l`6 z_<<*)4-9EZKn5s`fn)M}^@&vuNrkU!XRooTIojPY&`;~clW0x?FzfZ_sMvZ1CDtun zY}&WNpxY7j0G-iFiG`vZq2RB>uZPudarjV7>XGZ=S$Mqil%AJsP%f*H#{e4@t zupHlhA(10bPwk#*3Q`9Tp6>9+1*uRCpBo>jPBmN z2=4A3YxmcRY=8uH1k!$hV``C$yx{sV1#Tjsdl7%=wJNvYgb{`n#*)Grh8qkKnt;f! zW>M-XqOOT|AUO)xxp`CO<2T=XY~3%xKE2bjHYV0$S`B5IoHU54FE!$6&@!1!pF&Lf zU)hw@Qj4HzoZHH8pNmzrHm#Fl0J?$~C*LH}%B(eVUSJ5hRg39jl|A@f5~x12_xp}W z!q{C+%=wd7Vy!175`Y-t2WCoLKNBzj*L^j`FEDwhvRHO7{n*@sP3;0sYxrLmFXKa$ z2t@e@qWpmfaD2b;%nYmSk+)6ItyaN__Fq%p@+QBYukC;C!yWW_Hx5_bCn$S#YxQ!T zVbs|+ipgV@<^4|Fe$>Rqys&ItXL-tL(I&c-o(~@n2+#|k$M7p-Io|i7EqL(m*jme! zCTzF~rwx0!eb+kT#EsTut^yyj0la=n7AO}-Nhe0+YNyW6xXx%RuXxo zNZ2wcGp>+Bo#iVwAeHZN_sSs{5tt`Ha#XhR*vsw>?_U^A|FNy1sAv%?LBv)489tC7 z*Qp~|S4{tIVRbFEHUYq%|NbJ@a$2WmG23;URKZ_eu{&%#Z%pX~vca{Nd_^vS_A61B zO?d4qJRQ5op~rn8+!4mHPhZQ@-(g@#hOl476$kXkTCuCh~*piO%+L*5I;+ z#o28-oUQnS)QCjPcd6;KCJ^y2XJD`NvWfy0)wSiDh;h+n>9*28FFslEJ*sNj?C!|2 zI5UMIzyV&JyKbb4%nAH?R>bEKPqz^O)Yvf2_?%2 zAxKveT+Ob2F$;{cSEM4jCmpFX8wn537Vj0et&tua68;UHqa3suSPVGp31Az_4f-y7 z5C^li-oujPNEof~?C`?phG| zea4UNp=$cSCsTuixM87CUOSq#V95z+iR5?1iHm{=r)&3EP&}3i*EOZO@XST!luLwY zI^<|ZZ+rYeHz+Zc3v#xr2V|^30-?7%IA2Bc{n)ynY=01wnYwrMsLCSp z%r~^~k45L=_QrwzjLzPXF_am6IY0$-!izwTUSRQ3+;4kg)O(rZP2vj+Lt-Mzm(JWw z7)BUUwuJxoo_eX6lF##O?T$jOTC$40rBm&pUh<*3wG3WTO=`U6;z=A#P~(@S=vL#m zIzA~I7TLGg?%Q8HI?y_dPQ3yd%F+i6WlpGA5F)q4lBNrJ1V)rQCmJ&puNJ$-_*1?d z{aL9pkDOz$NE%lN0ylXdQ_9ahbnV3OU7rQq>sX&=l4&c)67fCL;;1G?PA)BfAMnzk zrB>-?!o0LvMYN*b+d;Twby~~n9)D-C&#QFH2TF&|y+ynL#*2wV%X|wTE?`R3X#V)! zB=91My|?$Nmm)1KZciPRu<2GCZ+P9G7KZqi#ZoVq855ah8z>^3d&>mAjQBklL95|= zYuM}ZE?EGfaKsWArY^&JRraQ zSwT*rz;G-z<%=S2EI4_DC#RnOs&7cjckI3yZqgGGd*|ivwTn}@VQ#_NC(Q0C2H6X% z2L@8qgVwjD=u^AL4(z^^azdiYJ)b=Ne*clJoUTrjlzd zpSJ=r=-B0s5}VWr(%xL~Qrl?-uQ0&J(h1z7R!o{X+|w~^Yxy_Hq1;3uxKtvEe7EA9 zsM5UJphzGPgE|G6vv$2i`$!eaH7LxYJlR%yeA;H|;XKXnCCgtld*n2v1~& z7f{7i--vwKtK!)fNyO#+jYMVPqZon6`9QiN_t?K;_$g_u(z(DQIoxJjl%#P8{9T8T zWhU}Nk{Vh&5o-Fkiue;+(Zq54gv#NF!kyF+c`ioDgEk7<4PD z61;mikrF<3GwjgN4gTSxByc+8*rxC@80qEUrA{ZZ6*;5yfRU=0I%PC8o*aNG-q$Ns znP8-Rznd)vCPb9#0xe=>E-?r?042PZa}&VTg$P)g`&OrgX8uJ1m?P_?-7_$JH}5tn z{A)F)(4T?*?rk(yo&@9+BiO}s>EF)k)Xs^nVN0Z$1$%_*6X-;1)*Ah5b$(e9(ZhEM z9i%h4NTTneycq1g4j&Df&|av4T6?uSdVnG;pjz$?|JSN0X5mw=|HR{=GBd*aX5oRX zR}5_dV|8!wdqHcociIJ*uQzn_6|5$@nb6#F(HM07?-2nt@Y_g4fsS$$CUx=aK!Jt< zB*9Lw>6a5zcs@jY#rI__mHf1mJ z!s+ZBE5cm1k;u?ZgM5Wr3PrRTzzqh4XHQN0&c6elPl~v=eO=Qq$=2>IRY#-UtqB6Q z{XiYjCfF-u!Anu4;O=?rgZvkY2Lagd3SrKjA956{W?n611&)t<*#5(M9XtS_u-3xU z5%WQ3DU}$&J=U7&U4U5Cc?l_$fafl%kNdcFQ440YE$9sBOarX6DZQ$AI>=Y&te}e0 zqI8LV*2ubDMn|WsSPs|Zv43y9E{Y?EGXGp4v^o`_fu(bC=bV!d8xU#$~no(xR=?-@;XhZ*R0 zK*bD#xN($zM9U#aiF@uYAF$b<8t38!H_Wa77B8Sc$;nNMoN@rkv14L7dZLStB4H{B zhRAHOOf1Jd9T&RTP)xugHvv4FpLOhL@9y;jeqkpYDbnm82!r5=9X)ii@CoGs2VZH2 z5seFUgZF)4dO`x;4gOBLpMLu25D}&lZ_25UVQ=f_aN?Ft_`RP$*J8DeB{A|S>Zeax z7BvZMCSeK&onzwfiyRhR13Bo5#!-$Ws$*`Gl(~vsHub|R5nV=9P`Rb_7_(rNh0{SN zETXf39e7OjmFUkMRi7KMG)q+g;f1jbuR^kd;OT{r|60A0(yfX&l30(%0P`7e;wu}Q zf-LRLwez5R0&!2&KljEKA=+xL?wy|S&CfQ=D%vzy@zjP5?xGV9L z3lElp_~9)+LV%3@hRsi(mHWxH+UTMtjKQGG6voVdlLbF=LUB-kBO1~-A?-*C7(@nt z-`~z#`fQE19ZxEfXlP;sus^A^C8H9SPCAlqzpjUJy5rM;;jr z2I6!X-`DbyOodFWdHWqPR&*flfd(@4Kz`WL4JMMSPx8dIzvhzP&W(*AJ7=&-Jy-Lx^w8D!mQ4@SmqRW#j0cJ&=(ZnN+bI;Ge z$OAXiu@ggds_Z@lAB9*QfNmQ9)^?|S3uUanlQIT-1-IMt#vb9Gie|#2B?Aonua$3Gur&_NKwOcDx zS}X+xB#)A%N%4P1In87e?g;%d`6+mW?^?4 zhZW)69AN-6i7__j%$q6bjVA&%2P%d0QA4*wF(S%WeTwNkwZM#0ITJa_h(%Qdof?+c zj;}-HN!L-S9!F+U2xzV zU&qX`noNm@SyAnfs0Tc$sp?3ajpx2m=P{-PF&0pTekgh#h6_wRpuivQAKj8JnsVq~|#|DWD@!Q9>woKSP(|3cPZG8Ns~ZX~KE#oI)g# z2KOx0)Q_u<4^DGe&1PY@^&?&|Iz;`&gS3bY9sjy@+m11-zgp z>SU-N6CeY-g1j2fRbRlC;N!;3Muxx!BHF;bRPtb=SN=MpbTaz`B}OMfvLYl^2ed@K zTxqZSmr;6^smIZ1O|n4KD%AvGj{7aNk-Jb2xiLj`T!hG3N~zOCyR#b!R(sngxv;(p z`FP3wYw;d(6V-;a^am3J_^>AeI^!vtkRk+^l~MBzG~p+0;P|Fx4kwl+e&K(*%};PL zUvp3uw2Xi)Dd+>jFZ&~Rgg+(f+uW~)(; z5Kgkd%qzmcxBI?!GpyKO3H?h_&%1FmXcq%qH2efPF%IB&Jm6E71H>g+6N;_L+z%Z= zcgg2~3cu)5AL!&cJ@5%qvJj;LQOAI=Jp6ScZdna|Llo=-6IqE85t*Ggvd%lkA5hv3 z88Q@G85n$#0X=T#p)x1%{kl$QhqDg;;Su-{qA=*h5`DF3A7Jo3jdE%`_bmggW>4F+ zTt?x$#kWKw_-gONfV~6f&4*q&drO-00j~1PSnA2occP4tv}w2cQyE&9<&4LtWocqr^vAGPA&sV$+o(iE;)`Yt`^N z(t_Gi*2$gD`4Ku=Ufc66F6*XMf1zeybIDvXI zTs8hJ1D{{0=S{U2xtQQ?UUPHW=J7e|@jy+&n?Y0?j2G$+uw*Mu*~^%qlW?fi?>vC? z0PtKHV##R4P$zb+3Le4%%vS&*x!b@UN$O_8No^P}Dn@`;mJlUwF=D7c%<0v+4sec( z%{nZQFd+nA@^7B!NvKS`DZBkj%W|1`05 z^++PB=Y*3UuueuGsi@vdhd}OHW9i0uF6aXYJ~XM~$u$Jf$HpLj;r>?#RT<$VdJ+Mg ze1T3jGi=z5h1nm+&sjAHe5sm4Y+hYPRi58U<(>dS-+R|FU(>-)UJ(d8-GZ)y;wrDl zWT+wklJogeE*_A%;x3vpA-+5bLwb(=b5L~ET(3p((5lZx!WQc=Hdb48qWh*qjkG`f zV|)baH~WZH|B_AeWt3KC1eCZ4xkw<6sAS z|5f8j=QT*N;)}jqqTT{|P3cB*k2lr-(|vA(S*0so<}$+rjFG3wUKX^JpzL1l9<6cy zow|SB4KcL-e4!abM-W?raGakQK_>~i8qP4cFe<+4MR#UTegPiR)9=anzP~1tJ)3xw zPajmTL<71Iwswz5S~(C$uToo{A=1Iqhp2S8$Bxv#7Gnf;J9nEU@CSm>E!AHM$qowo z-fA2kA_TNGvsD+hf)hl|h2gWUg?{kU80N|YAYJ7)cKUZmflew!!AaHoZD7Xd|@VNIce=wOtNvDjLaN(l{u8&wbZl-Di$<{nLe5UrEh>ZUaQQsX%W&8hs zonw#enJs%nldLS&xqCINE9n}yTE~8DsVL8e0z*yFXU}97 zIs`8J>|U@1#fT730+`xor?S4keS>i_Kehfz5GhA{XJkW}bUYwo{4u2~0%Tn)a97!m znLMuZS{Lk2aSlYSJwX0RC6Og5GKVf2bjpft3bvkh*H1VQS8*WowhzNKV`o4G- zJ(Km7V6nsUAaxx`pZuYS$nD&e=m$+^)0@`-rKE097ww+}g`AZLRBkxVIx%#;xPEG0 za$Hv_q0iJ#Z3b|}_=7L)*Uuc|+k_cMs|a;(+gb@BHc`9Vj}Kwrt7iCP)hb?Uc7q_h zAy`xPRZL@{)?3XCiNdQDrVg_7yX1U4X!4eB2OBp`zHs$2DWg4+7ZCwNDOD~e`eng}-S|Ik;NO^h9 z^^bxK(61GF!pQd~CFUPpY00vkB*@204QYjX80w=4woZQHp#!Z^9(-AAlG;$mCqEF7 z=ee&3ZGw}nNP#OF?z0Z>03o@M(h30HjPplyt?El1Ysca%*~&DBi%GF|v_?CLMBd)v zB3RdfM(qH{lW(m^W#YqToVp?KQ(~<+I4--XzBL}4bXwvjbg$vCzhlIb;aNH#dYgnG zWf7y$FO$tlDmoJmrOZ2{C)RZ;Q48<9t!ahXm0)Yp7TiV6tlJgHNX^1Bf2-XHNYm>hVojf*_;^}r*3Q`6@lh096g$+!%ACu-~d84 z&Dw|Ze2B~>eLiZI$ar^Dn;K08qYcpj2F6+D;kBR#Bu&>Y$F-%(sNI2P*;P0*dPE(G zR?Fi6die9unHCog=T<`w(jbizQ}KxkLJOs?D44jnom7^|>y0IPvA9KN0kgfM) z-8~l|N_$Ydq_09*+8+NmqkGA^>TJe8_MUY5oJF>~^f$1!4!R@hyNY^$^fT+h%kbMT zfyJU^JDEy-;vZO6PM+&?Lru~h5npu*A9u+3dSwy2>t>(!1E*@6OMLS3iI;JeqVnK$ z{BW+(GGEO`v+!r&%%*Ly^`(`a_L77@hoOJ;k{!+Rs`g$6rSN$dLYxoNf=bDKs~U|X zd{D;5YW7n%2(sUO%`cD*(hcAi$g3od#~zcQ@xX*7_w-A%$#%DJyd%+cgriJpeM-<2 zxlgXshbUF+{-@qFim$&B+7lQz*8IHMsE1zh1!|N0fUiq?MM!q#lP}f*a$VR$I-@UH zHcG#=A_VZ#Eb~b0EO00{g+xLu41+bDzSP0Y10^Fsp$M2%voR%lP-e$z16`;&X$a)K zB-XKa9;5NXSA`+AT2`c@q0AugW(b8qe+l39)WZ$m#~$$#L%`LYk495~EnQkGXkTpx z(QbL(QLSuJRWMO0%M^%hpICo1Fkz?L^9Vb`jiXp4=f~%4QYxV7Nwt!sv-apD)pck! zE23S6AAikb{&*eg-5AYxD#X`Kip+czGr3f?c;4apC zQ=)>Y&C&NfI4LO@EUdO~x^QZ#9l03++^9joa8k7-<&-_`__7D9ceyf-!pHeVIi1<4 z9mF0|24ou+D|H@lM&GVFeCmI_09j_@%L+>#yx#j_O;ES+kzY?-?`&irZX=Z{QL16n z8`@L`X9H*KP374u`Y%6#A&aiQuBC1!j*}fZ?KnU^uS$VNpyG@&GMae{7(y-PgnCp#oVjSDVsNnee^>})FGOFsV|0?p2<=y zMl%I2NNpDW5>XJYuBe!ta7OC{c1BM6DUrw5uRlKW`OYSG>KrG8dXq=wRpsnvIQRC@LiU z_4<_^78?Q_(|7~FAh)eVse1Q6>tAq;HUf(oJxjBUY5#vx&*Px4*`A&OVe-kObjRAH(j79DEZN=lQ#GuA2Jv zi9jeVYKtO~tKRib$4_MzH#6iUMAHO%^E3;6iD<=J`S^3r;osEe$h{Dx(L|24S-kdt zuF?3CrR*(n1XNu7g;9_d_r0k*9SPP;3sGV#T-*dgLB`ag_wxsrtdks`AXO!!dDyqy zkp%1Wz-;T~;F-|rhffrEDh&|Pd256G*Z6c&=K*>fOE3WI2DQwC?`Ecxe4@Ud3wt`c zgn^P0nlaYhT$4^Jl_MrOQ?r4A@+Aq2K*V-TA49>rT(P_cE#|Om_n_aR#b$wgmP!8I z8mu=8DKX|%nyW>C)jG?r9Xg{iWJR7C1L8EqUVbAjnjw$KKE#Z8nNV~j5Qu|Yd2lu| zaxt+!%59T(xvIMsE0WjwV3bkD=R;_64sR#Fzeb6fD8&8^v?j2Fl$lD@3kyjWCs~5r z1_xJ)7H$G%TY;3IpegR43b)l87H#n9Z_rQNkjUi4Z4I zQhu$a*&Qi8bDyizq0Ay0Qtp;taD3ZcGd-}J{;@D@lb!dzDR$JY(RIagKyx>H)dd95 z@JM9b&SW!idEb+(M^C%$_6}mkQxp>sIlD&VOj4&UeJ|T!0VU>kAykY&OX~*zoVd0; z^?LeV+0*@ya-m2K*|7PG=3SybCW9axNhsgKdGCnnKvZAtN`CGHtz?xA>m*Rl8kp5x z(~-R@9yw}vLnfyeDlmhy7Llh$m-_@lq5KMAST~P3AM5!*7EwR9hkEpRPUlCkj&Zy4 zkUf9mfYJr9OR79uAOLL(n zW_0t&#fF7`9acouI}&qd4`1h6`brL!>y#MdFUi*DD3gx5K$>gwJ4`i9D|8cBir!d3 z=f5x8J~e-LcEffwDV@Ux%-eQo6CXn~s!(C-eYweDaWV>2X*(v-7oSb))TQs`aD;rB z==K(|^v3NNgH2`Y#pGSywn_sEmzh4?KnmPJy5afN(Ucc#onJ^O_+NWx``fVWuhnVI zX0;Og7zA!nAVc^<^>-iQ7*E+m^h=;~lE{|#x*ji`t_sNihv|XFAB=dz7u%7OC|#7z zSqAp2nlBy!a_Y!rF~{^Vg|ueK&kzo`jOeT7S@L!fKlzI>}PI+bf|iFjX1;q&Z1bya%{b*lFOX4AQ~PxHhj-)BfDV=iXfHQg9dkuIBVkV zPOw+ikDQZ#HtPKSG&^G75zeP|U;il@6QFKDjx8&Ei;`5epY1bE{~D}VTdfqsOx040 zGRElNf1l+ITtdHCsmh=nnISHb7J$5kVF6mjBp=_Y}3{6bUZ7`@=pdEUT2^!6;d2bi(>#c#$**s>(kHeCT^%g~CfM@HHDyP`%Ee{phP& zi4@LEt>@#2kaf7ia_6|L+s1S}o+u|wzTh|y zu?J@;db+(ix{_3Oof+kE7yJymD)Mx=;Xz%{OwU|-*s~}4;}rKHZ}2fc9?qHFN4T>& zXTf&Mu!q@qe?3OZbHuaJgWuPx+6hO9A@3W*^l;SgLO|KXa)s%xZG7(zE|oNYU82-# z?AHSzhcFA&oQDp1q?KG?1XdhEtfPh?=@P>>AnE2i9nvfxG0k4Qc(V=r>rn&zPMOF{ zvF;`FnT5$;D{-=&JA3!W_iK*)=0A8PVQaIEnE46TUN1E_eY-wA?EP)iBy+M9&xBmw&XmVs+R^atiDQTg zYOJ=m#?)MeO;2KM&N4Tfp|C6cyuuU|X!P=fUYODAovQ1b@=br; zC}EDE^wS9Gu3$6H=y1q*|83LE-AhXaa!p;$JJ=_^>LDs4^c`*+zd*dm*W^P+FIu#|x{f~gJd(=QXF2DzfsoBw72kmXHt z4s-)7URp;2!TTL+>Bpm^s*0-ZCVAMsMx9$8Pt1`~9)uIk+i`Io81!*Nm;U&B975oa z11L~+#)kLbeivg(z(3`RN^k6V7Ly9Ato)u>;l+c#oqz@G);pbJTEWLOlY$#WOkAMM zsP-0T;=b5jWkH(Ye%YTgUM&iuZZfC8B1kpmx*}eH?Y|R6umET_Db%CqOMl0PcCIJP z<%f5fzB`yG2&gkklGPC171&>J&)}DJG;iI2>E&hq!&j12H|-QNkB66Z4KYuS!z(ws zS~gq2F1AKZ1^t`ey#Ko`l5iHv?1R z4Ind4*Sgh3B3{CdqhGXCZRPgve#Z2B&xbkx@g8AV0dF^n+yyBCRTFw6e)&8A2H$dk z<N%SUpVvw|K0Ix2!?xB&0N`I@WoW?G}_z{&%}`HPyEb5uR}+UDfWNPBMYZ(mkpBfR!*tGG0K#kX=L z_}b!%*9q`Kfo8eqp~pZ^Lj2pxrG!R`Sj3BFEF0sRSV~=CkR(;*Xa4~12>$!W@=s{Y z8Q#7YW*<%Zt`_6{h~5o?uJ-A6)h?JP|KqF8waE-VHkY}BLUy-tDag?hda|E`%C!YI z9o}6bA^zQ1Hr7doSMP5w6Xhgs7emy3SVz`o5MoVbUMxgc;=dY6d+WxBWBvY-#1GPV z7*Jy4i~Hb_Jsp1CQ7LY zSN!oVLzRzDG9c~|E=ky_0{c^A08CRm6CUHuFfQ)*d@U|5Tk$PO8RUms!xa%DAf;SW zDM=uuUk&Ote%+VhU$6Xh-_h?Z`Y?vZ`$k?wSq;mGR#klFZJeoEZi2I0!M>W6N?!PVaAz&oKQ-0UZ-7 z?=q(2i6}cF6G`alwrIXLGi#N?6WdRRcT`UA;E2v22Mm-rpF>zXv0Wb}1B_N3WdN$*bFGa_##0oJEg(+PLr9B<~7;;~LumJ0gsUp@a z6Vj2M`hLyoWn}}Mrba#y7s$Ed8GLCz{D>E_iN=|4rQ|ZnlD*iC9zAEdSr?)*zCtLad|kiu z<#+qb%VNvJ#Ki_5X0e}mcIz1Ua~9Ej`P;4=YOz%m`D-n(@JYDLaSDU3lt)s*5dafd zQ;FRj$yzafr=KTx8hU%Lx!U)4ncJQE;N}oeCzC6ul~UrF2)3-b)V6WeSssQhR#|9o z2eUA4m74$yO$a)EcdqK}7BTY{ObGcdun2F!e{`CPJq(gFh<_ic;BkET=`fD+DXmb5 zCvw$J6!IcoNT<|{KHK_zZlgOkK)E+ZBlx8MKgtb5VQYNT-n$PJ(t|SZE}Uc8DrVk> zClO&z0$9F&Drp9gdL6b+bNg$@hc7l<2`R@`_Svn_4?O6{RhVJq7R*rjak+1<;_}$& z8jh6zVI1rOBGnLisBITe#29{_^FbUbT9J8N<9%4sj(-1Rgm8W+WP}pHq?KpsulrHj zSB|4+R3~Of2sL=S2O{cVCE}*jRlvcTO zd#@>7czHDtJ$te>7nXTT?PMW0`T>c<@2;^%3aJ}AFd^#X>O%yc4KIWw!B*g72+}_v zK6ue|$^S8vPyh)W7qy`IFysL4H^Ah877fb-pT1XZ##1aE4IJ92lyH4pG#hB)#y6Tf z5v@yPJoWXiohvUk`%l|_i8>FE5M`X|9k^%czxFCcc;PB(>iu*hT-8uN!o#O@0&?br z`3xq*N;YHfOxzs^4A)r_yUM?us1md45blVizt5;c2wIb3YLnv#t&@Kv%$sw^IMR6^ z;En_1k*&WGVi~&*Ii7Hssy15{MqX_(gPKV4Y&XHRZaoT{{)UcXT2Ul{Pc>BEVBfGc0I zZLDdZdg|sVEPHu@6rgn;^Y@@$Tdu}KeoBntv3OWEc}&J$kC`ledIZQwErLWg$^ds| zKCI@V%`!PjXZF+G9*F4;i@eLse=%nnz50%u`iX?wSYm8m3>wO_Rm|<0=`#B*B@=4* z>`Ck2iP#}Pt{xH<=Ze;9dHh@Il>U_<=V#Q=LC7g1;#c^RG{TkQ;A!twmzGux-o>s& zhvxKVjx4RO@qA1*9KAa1M}>$7G)WAqwwf@_%tvvvKdzFawzB3EtnaZ?qibp&hS2iW z$0m~%lNw!Ye-0nd(o&F;4A>{2_|$etAi(l-701K=Ef18fZ`-F|o{{_5)xWH0)shvU zXQ3xQR-nqoO6F4v*>+IxU>fp0_r6J0SGV-o2*aara8E?Z(Yr>kW`-uOy}keIiuH{9 z%xhi{(_mAvN9QQh9+k%a)0X^Wmx$;@LP?y0%w%Lc#wEZsl zn^VECVF_b4V{wAeYlERj1$0>ttn-Uyi_0>@vLn34aEcP_5cY%pAi?%F^YOHTdg|!04y=V? zdZ#}Lr4MhK4q<(N2|(QMrVu+in54ab{M|Hk3EAfzo=>^Fq`UN7S*4+_$^pyQo0$p{ z9C3rBY?mQ5ZAotM2b$%y9GWg!0bfEO2+&>m{cV*Ccfs7`rw!|u#IGbQpSZNGZ1)4q zF`8>9Xd$#vSiZBJJ8YencX*LMZ^<{Z%}1r-N*MG>AbPO~R7sdQ_T&U2Z~1Pk{;D}m zaF=5grYK@Md&y|k$T}88gVv7mJ>gNcJGl!EMnR6K>1*pwcRwL%jYsG3LXhqjcp=*% z(c8%6!iM05nO%96zSFCdTk*HuqGRA=IAI#2v7S#a~rR!=E;^j=7x>8roedPVTOZ z{WItC$D0iC)uvpw zV!oEZQG=)R#}Ex7u<_QBesxy+$bA7Ad~a+>UjFbReV8(&$fN?X28g+VH&C^G;NsF7 zU$>Kymsvnsvu6O$N?>zmD^$Um+jd3f)4N5JY0uT>T(%k7P%L>Lqljn<++K(<0Cw?P zm+t@~UH6PxtjLp@es_BCL+g5DCKa%) z9c#-Bx(Qo&9=rT&S#tR!yL`jN3T6xcSLlYO9;~&3f(exCWHgQ52TbFCOY8}n`FzXIp8z_u9(B>J}<_c3^;A@Y8+ zhMuq0wPSe~=iQ(!taDi$8tVi}sGA#;#1YuO$|u59g)L2Dc-GncnnSfV?eHGb;U4!gPj+ zRhg=+2)X+fC=PtEL;Xg#FI&s=%4ti^F}(Z-;HiRCcmT~Q;SpezVBx1ddUTqd3MpqS zeQ*OC!6WsWa*So@Z$!ppUA~!9DSQj-ow+JbtH2=Bza84d3E(KtmW8xtH2X52>u4=Q z3ND?Mzl8ba4}BO9zfXayq_l>9YERG6*0BPRwv6_3;OzXLv^D;*?O#xWe}X9I9cmqA#4+*AHg{(2bzg+`Cfm1HeCt(-gLwoM>>|0SUysXSB zgbN1n=~ja;3x}kgtg)orJ+u%Bk-1XUEjJxjny@fLCqJ1qPbbTKWQZt+cg9iKAujY{ zT1U8$4BBa=+f?e$TCZdo@}1$BUZs#a>>cDMf`_JU0hcFE-+W; zYXSs;@cY$L7?akH&~78kUX{L8+f>tXsGN*{h|1uX{bd#FuB4K;Exkpvmrfbftzs3* zQ*(u;!H%wt_#NIvvW&shs-Q~(R{M$H&u?f>7l>H>hdb0_-armC2qa0;ugf-teYLi3 zBKF{)AtDL-KG-u87;8Xl@ATZ|+IOPoZtJ`4(X3nB_^|kUy$9gw)?+gc==CbM=ES^w z4BGWS%(=5mj|KIDTPx)gq60qRa4}A6gCw_JU%`j5nxrl7TTE8A{)`{&)g+l z^Hpp@k@ws%>QV#pN?>WwVvuBnod%_yaa0z_7&QJegkm0ih%9KP@k;!n+|{GlPgG9p zD^ST-uT-xXGr zPg)4|@ZCg!e8YzrfWMFS2bR@D&rq{xL`FtFtGs%aph&BAV@h-kTi_DeP2CjzG@H!nDN4n8A0p|}5&FMS= z2hC&$E2I5c$_&_yi(4ND6$)5==kKW(wovViC0HXyH^OhIGu-2&cM2F}w}1H1%-!qj zOpU7cqC3zdbyDJAYuT}`N8hKzHgY;EAmV|y*ic&t$~o0sJQM!pB};q%R%t%wlhe8GOFfG6 z4V_`TA&}W{!P)ioK@Q@VqvfOt-5OBKgc1IX@h3dnTo90;;)NjDxyBRl>+SyYdS z4nuQRF<-N-Lh5_D8=hhoH!%@h8AY(z@RxOa4&RX>9ylH9Qeg^&i)06L^S?gLT@)y@ zhYZlG&R_RRPM|^8>#dI~wjl}V5qe(mIgX=%vXbp&>kXYI$jLOk@qx57k#z8w!=R(d zy(cKN_}}1Ec3sHqF>hi1#e>RiBxv*yO!0{>W~(8tVlz~G?*u5fb0W=D8%;P-i6Kf*5Iwj&9^lUYWRTR^Xuvd9~*!X}h0m`VYx*1PduXN7COn8GhnTt1w z13l?0e~!vV6HUA%5%lmkvSn_QPRIJQvHr%gEQYM)3>gbVlvf_Aoqq4lL1b=uff67n z<$1K~?JCw6d7>M?V&s|%bLOPm1gLJjBX{I=^D$L57F7JN5Qj6A+1zs0!0kS^rmE-O zE_(dg&FO50h^2m&DUGd?-D!KHwhigfypmdO^Cg1WT1^XxTV*fb(c`eZ=F6KOzSCU5 zJ8CtYozdN55c}x4eJ*moUNV6a0GN2HH$Z9$OlTMQ4F7q7US9rHDK4@MZI9E=`_^swnl|=mdb5Ie?rBYP9X2Me-)qbIy5ByVgdBOCcxX}FA%=Iq z|EhwhZw=k-9XKDuxxv>32%ZXe*88W!PMuWdPtvF7?|sFkdH%i(Zp<^RFYH%jeE~c9 zdJEuANwQUpt29@Np|#D= z9w@l6NFwU_6F<>{w}W`+_0I{f3n}1&h%*xZ2le+FKbB4+FdeT14nq9hId<;db)}1> zsf6hOQyo@&M-fC5G&teTBf-NmR;~e!y$rC)GrK}l$0FFPbXxvG95X_8&eT>c`ouCR zJb4mPH$&sZ7Vw^r+Jie*NXfkk{#Fjg)+=FfcE@gmR81)tvUKAqW_JuF%+<8vnABmISkT+ZTB%725(_cCH+$ES=?^!Q>gpPEhlJ%6U{GNe7p_`X1ex9!0Os#-Ss4g%{hDIntTfgQFat|>F{1BzLw`YG={6}D_Q8J zzPk(Z48X6iQp4sF;K7bC;hQ=a@GqJ8DfvoH0y>dvC;shM#$stMJvKT9VMhfgqK*f2 z)tp9ir(4YsAcT|TT})aIuZ6GESf#zE-|^sR&s>3}_H?_KGwM?YnEV?)ADt7z7CAAA;Op)i2-Eh*WAOf}~YiH(h|SZ}Vy zhSzTDyNPn>CP63S9!w#KqHXcF? z56y59ok^bBw)wEg2!<^w43d+Ir>$9)jQxq!TxT_Y%l&;|H%|! z|IC;^BQXBvfX)#T{hGR?`ymwR&qyXY!!eJ<7s~0Rsj~))yZ&4hDfUKxS(U18{Jo07jv(to^J~kOk$$kT62l%bJ6kax3ELqmDWLn&6q?Iw zF=H)*=!J2kPQIu5lg@eF$R5uoBlXmBvgpozDE0YA4f=dZ@l*6IgX5cq>%&Cn0a1>> zGcnDjcqsIV%Euf&j^1@6UC<}5Utyo|uGTHtb7syZ%#{jQQMtKMLB&RX%nEDB-_Z0A z+~xt@^+@`(J<^R^$$9l_@HAWkZr8AQgTHo?7=W#peD4dRa4YH2AFI>@!#PHn1^|+>{9(`7CN}YIRPt*7CB~f&O%0I65ySw`--Yz->RY^Az0LJf66_T5l?i9LF1Yx8qXSF3ulEd`&f2 z5us)9k`$@RLf3*F^R1P|2=-pOn`W5Ztc zY8*T^rlX9yRWBETa1^JI^LJ@*jGncZ2WxcP4_kNR#< zeDiG2`(&6di(M78IE5W22s~^ykqFrV3Y&9#TRa?!_ShNSX{Zef+qH7oeOu&Fk z^ZP1O>u8YBY8guoOA)a0CL%fjVboZKu;*KIpa=2+MA9yrzWJ69*f!kW;rWBis%|Uh z`bXE%d=VwgegFA|=dQEMMN(Zfj!oruBssa?dXKhb!UDWO}I1lo9a~R9H%s?b2zXsMY5t%+$$3 z(mI#H&bXJmK&(q|fv+U1>@^}jGXCyu6&y!|^jF*D;i|7V7cH5c(xPmE z8MPwkv}T<=n@~<8v%WdiOD6>aKn7m`=8HwDA0?N*jf(07hXu}r{y>dB{thhM0G!{6>f8I zo{olAXi5J2te9>Z=Wak3(uLn`9tuE}ZoZew_*j>|kdNokYBr%$Pta^jq{h-iz(t&8 zKmi#mP7*&|fQiFKg2T|SoY>42YI2J=@aXd&;z73D1;vhV>1}yF-!SxQy|pQP>RhDs z^hf%fu)^9_flxhWmLV~I1ppr+qCr@=KjcF923GscQG)Lv!-z3aYUG@h9fwyB$yAfv zc$4BQowq?1qTUs`rA;`6sSX*d_jn2unBrgpr^I=P@L$FS@8(=Fs7l6VnVi!uyt*qN zi2QShKYb?@7Q`bx*0QkXEH3)>WgnV5 z(45cpZ7+Ao-dSobyuG{ShX~5Kc-RhpQd3B|d0c}N!A%};h$!*QC_g-;Qboqsmbgj7 zIecQ`b0RatNJ)Ag)9{9?*EZVrHQxo{NYwk3RTSk!YugQN1B>-aWc=QaMMTM$)yT#* zq48EG!Z=>1+}!6AYKnJuZ(TC^w&hsn_HM$ zR$YcyI@P-mU78>fC*S<9SR$WI&OeaZ&Sh4e0|D}*0cIP3p)PjmK$zDfpH;FTDj*}>roKcHthtoa_o;5J+LnAlqw`j z1^=PkbXPsRm2CIeq1lfejTL@o1rMakA^Nmh_n}P8y04U>|8i_#33GRsCV6I)(TZ;t zaG6VYd8Q~F;VK65Tn6_6tsMy)fOiSrmTFikK;Pc`P-o38TR$a8UTd?5MQZWe+z+U4 zNDh@yb^^7F{u}^j0j>P6K@GAL#@hDlPsw;YH_wdz}S2!i%kU=d*%Q&t)($1Udfwss(jD_@rNIh0i4gEQnfH@4E@0Th-{ zcgB}K13We%4GWyH`wSFJTI0@!JoXK#z~fW)w4RlskZiBM`x)GI%5p7@H2hmQRGT2@ z-+_8lJg%FiKBx(B_A{tC}$ISF)t(_ z8eO4&G>HSYa~&g{@$N54&KlS>68wTt9_~os_PFXIV8h4h{a1P>x5x82j%sz zX2>2;hj?GHL*nu%5Mvi#(@`rD(Gu>$yeNd`1jGn>foxFjLpNrDlTcWrz#gj84NNI< zhUB{nFLi(Tih@Z{)rRHS@ z5M3gvq|fYN?7L&^5+7exe)Waf*034>e53=(2e^*!J5vNk3Rev^P$ERedv_)wdKI(m zNKBVkLst+5Iy*8oUo*;JC`hj0I(R0!7e8hJ&G!4CJ-tgE>011k#f4BbzeXk*llPph=$Ip>12Vozxd}NR7F}DW z1=dv&zMDLf479LU>%Qe2?h41U(UvxAZ~QLGcc3{`dO>d&O6;Akz()w-19d(Jec!K0 z;x3L#XeZfOu^dDGKHo3%6daa?-Rq<==L)fjwtbxZD2gO;|9^U8aAd!p#5WagtBDUo zGKlfOL>5~0;X}z58Q*>T&t5pnfVOnDdNH34uzv4d$n4sL9vt8&iGDglR%-9_s^^I=jBxb-73j8o?sK$nz z#dWU2ddJqSA#ot;3V45N8uAs$Is1Rgg8CpL=r*$CK$G|Xrzmu&K}+6)2+9t2e8hut z)>z^nZsg_qr(_ACv*1DXCqr*CB95ClW7vRpC|969ZuZa*=RfF_1x%^+CLBGzpQ)(=51#0Z}cdbeyo`i>T{P6HJfQ^5bV|6wr%J4DZ|jQxD&= zwrJ=@L1p_qHZh6ftO`Uq{v=KsCWMJK^e*QkHZjb>^d49T-ftlfaClV$R!BN4>s`5QGbUej1Sto}~M0h5Ke z)`F+6`!Kz^2OMZ*b}UVt$m+NE$;r!RAy$7*FahoBP&o}Ufe2{jV=oFw_JS#G!_=7_ zHPiz@{BazAfR$u=aERXi{1=M-*2uyP))jp}Aev^xC`kcQIdx#DnD*qX!|HfT7^X99 z4GcqEG3wyKHlDX@RnRR+((1>w*($)Rpg$JTo!<;qIgtCHWD0h=(6G+|6(oL*;!ACF z9i@pNnu7=Pf3=~UICB0!Mk26alz*}c@LX(+0f&iw>lj!DXgjxWLxT|T zP}=IMEV)rPkItPw+r&QzaI!aTMbtJv$QYI?i?EWm(P>KUB zx-f6Yhnh7AWn%E2ND2&c*`e789C7F?@zz1wkP?ut2S6PE7lh<5*YUm$u_{0Q?xn7~ z_9I?Ofu1VK`7${_T1_t2A28*yu?If919bp3RT6ei{|Qv#LkkbE!-khfp8!HzTxtSs zrhKhs{=1jNEJ-dL&hs|9<2wzyAN{9B<0)r^aMy<<``8GQhdP86r>;CMg@e%oi6p7H zj&i{$+&U%DoIk#!wuK`xZJv`3?LfQ@EJ4)*NWS^{2O-SKZaHOTblica&)WHfM9KOR zWc7DLW1w~;faq@AD77WQA1`k_CkZ`=Y{sbB%12-m4ozt?bqThkK`H{DsQDNlAJRJ< zr>sDy*!&;C#fqU*q5v*it%brHI8^b~ob=Bk<646k`vLc@j=RD?Qitb1wz_ap9>eoo z{Xmp@6sw2s0pShGEU1i|&6sD;TiIDF5_*x3OSVW??0_Pg{Di_=E*l*g z&a)v(rAQX)g;nmC9OgrPROFml0v(X14rDmUtz{P;I-~+H6`S6COdbX@v)@a$QA1mjgiDzT-eFqEIR;O7(o2k72Gv-x8*ajIcnWDf4~BLRXWf|`kmKA@ zAzZX}$#YOd1lyqSEM{_x-{>B5sz_}MsTerjjD<=oLjg{?pdZ`AxhPO5*RrZshF(mh z)jLwbY>}hfseM8UI7iOk9Q1Jo5OdB?fX+%YfVr;)#A6LVKVu=KmE|eR24i6# zH(~l8CfJY2u4cwXgt^h#gl~Vaf4G?8zk^@G_dKsYAXvJ653aH5U62>=3cTt2ENYG3sgVkYMdr$j;D*hn!29 zHv*kzrUI657M@uiNU4I1F#QS0!=ofv?MnzBl z@)=>`9|v0cr%ho>9K^nw8?`$*Q#*Usz`}fMuaK}ab%O!I(6E<@C5sNhWVrVs!ymegTgLqJf(?`Uc4{Y=7;`j$0F!T1;on_hmrH`aD$Z9(%<6n{1g8YFm`cjVS;G(Z7&!>8egjoHySJ9rc;R~%eb3DVm5;DkJ#Z>7-KUqW~Tx4_Wa#s zrk)4^7jpJIEX1*1QQJr`h!CW_3M^U|$#B(XejR4~{pz*Ql?BgrZGh^Y+#x6TlqQTm0HlBKQYFsYWsH_0F>tPW)+Z-llxsRKE4R+I>kL`)wI;MN+ECG_pMqMKU zeW+O`sCO#5Vk>92<|^njQ!vX7GuRi{d!ve<*bM7Ss8ENFC$#&BE9C8dv>5D3F}RHf{2`k8@kLSv>=vDx8f&0Abt4(UXp=-|Ym=~w~U`U?eQM1Q{fcIi3_ADi);NA38#SPa6tD^IiK z?{m0Q(}^VEM_u@fXNPgG2FMlp5R@s9w z7GV-kf>+f4kyGD&whI7MuabckD~}yBl4l)1|4xDpU6>_Cxz2MJxw!8k%z*~G2jGG} zf?XT;If8gTsJVs~VK*fvD*zAKnYZQv0?xkadznGfRpJh*Ic~#>x@l5v_nB*O?kz1{ zMUjIw6a~bqf-vm(o(5=@c+PXxM7gs%{(?*HU-9^UHXCPa#=n*U-XdJZYV5nX_db;^4+z2yJBptRpWOGE@O@R~rD&dq?x;2oOyg`<9_k;>V}< zk7={e9)T^-A%&u%ctLFDjJL(D_9d$AXqACqlj|V?vD<4$r41*16Otb6REOZaab+N0 zp^7GBr2;)QXEPgWGcPtQ>2}!eQB8inDY}y^3ACS(z$Svm$awsnH3w1;=W7OG1-lRI zsdQrwmjA*j89BgUc6cxY-=@ECC#M$mfg?~0Jhc~EaeIJn%y9^fIkAulU4t9!Su5!f zmx{IE-Hf+<+^s+t2$KgQG~02sb5rwRo;8@P#_P`! zEkn01riCrI6`)>9bf-maJ6_m?aK|Am$+Y_3@e1TuNbyzR1>;p>8@H2bZtCtlHg?Qs z!9{_icr3pyL;FT}uaS|vVTEBOC39WHgme+Gbw0wk77CwsB`F5JOctC>OnPaj@oKtW zh^oRJ^05oA{BuFoE=+~qn^)N?N2P}$l{M3=6^G^Iwj|Jv(2xj7e_&JP3k8Dqb~n*XaD*fU{#W_qVVjyJ}&^0R{S*uxvoelimTvpZC`MsNX<9 z;_6cN0q7!z|Naf)cO3qn)hH}>%w^%VjqdzY+X$->kn`(gdNR+wKT9OcI@#y4Jdc5m zxM-4*ylw0rOci2dVuvE5i85RKAr`8-Gim_vY{K5 z9>6a<7Q9Tjc3ScrkdA8naINfhygVcW!BJ=KLn+wejh}-!#)fyl(pb{2~1{ zz_IV+|C{UMk^7u*=L%D|Kv#i>(wII6f;QG#Gk=UHic;^sq=~l}@ZT2mVYE^_TBXaY zJb5fY2&?_`ZO01ndK{Tp!%kfgLE}>!Teh*Iak&2M7;~>YFd%#=F)*eS9@DM(FcVt* zhGjw^T82?J9@|`N3#6CM6P9DTK&X2U2>KHcEnO!pB|y$jP#baoO%U$?J8|IBe-K2% zR}|_qR|=PH+_xZ%zCZonMi2M`r6tZ`NGdwV2RdH! zRyE!`(R#&bN%dh$9!Nv9bM}InEyl=g+TRpKq64}-GE{dSYk1!|1o89L|#MjQ586S*^E%wrHjFjNUU; z?u@T|5@_HvxBJ-kN7lT$n(riWP8{Dot?&0%>~1#s*{`OULEk1M_@#~^Cg>qOS9fY} zp6FQ9K+uUiQQzi)Adfsb@uqN%m0|;jrle|Qgj?MhB6DPKcFF)8W8%&NrCd<#L+*-r)Bt4 zmLGjgtTHW))nuHS_8Un3nBE~-7Y|A}!IUMaEj_2ff2_GkezxTMJ3@gz>i4iYl<^2o z-AytE=R$+3ZLG>n%_ThrS)M-;55Np%6)EQMMZ`mc*q7iJ4N?@TNr~lO@zek+Mxq_gcGI(xRkMgy|OUL@42V zp2Pe5`2#+`e&z>1c)ecF?VR&?o^zh(oM)=*ocrA4L93^9wJ+P?Va@(QV*w(@giUyz zK!5x6+vzTAn#5_PQ)D`2?eV<`oPmcR2;@bePP$_;etVu9pv)?*Y4EG2*x%C*fddNo zb#Ej`++Xw^9Fu<5UnS`BJZKm!!98L`GEt!T9o7Dc4hP-HVk`$!{iaZ@uK=Y{4Q0~8r(Hn z8BK^(?#*T#@VWy=f}C7ipbUr;DdQm$8J}b%SQPWU$X}%iXF%-~j{nb?1|PT`WZp z*;-Im^T1`Wi$1>!DZEEstZqy&vs#{^~^=ul&zN*3m zG#za|eIApF%tl9>2)868o_hwe@@%O!@8U5C;S^nN zoMDO>q#mGri&%Ag#C0LY!9J_z8AyCH>~IBA;m6&98dUJ;9%bd3KTZeDGOrpaX`*4N zwzTWL|~(B$XH13eTQjGhjVqdYa&yPjAa&&pFSXf&c@DkYmo$ zMB8}5HpB=Z&3>>LYx)$dY2$%2tq_vcuEh;Q{a)mK?o9VK0nXO{AnJ8=+gWoI4Z?6; z4-yefp6V-?A_UyH2!3@1Y8+g6=wSIgcj>H+0Z9UVt3oxp#>TcP;}2^Ng7+@NKPm>f zVqG-QQmVT#_LbMEu~lHHJ@~u%04MyK6jaPjmxY2E0@ys6GVP#@iv=fdg5^-{;EBb_ zZy**fI;80Lz?q|`B=EKOyIRtL$cMw0>Ug8_{Tp6;1_Ouq$P%VNSNO)mx4Iw~N^uE!-IY`ivp#C$w6+D(a zsWAMOkz-D+0Xeof40T)f8OS14?Vk|@trYUqYQ!oEDl~RIgsmPxcwCxC24bBfGEF;z zR@P=6aWvQF=iHeto4pKUUJDi^jQK2)X{qRvb=0q6mr%Fgbx8d%Ha>yEYF>6C*j+*4 ze22Y8k#D^F#3NFk6PR~dc1;+Tm2TH*hq2nUx;MONG-5*nXQSKzIGg&UxOR{WupR6n zU{Z_}ecU{ET~{6~=s=$hl;yGycL;}*4Gcrl2}zq` zdV%Dr+n~8UpM(#IV$G+NGme}Y+Jy91Oz#lf`1}XRjL`$ZcKJRVfxj_+N z%WZ^f%Ym(1X=X2y48a ztF?HoyDGZA_GcP<+J~1AsHHY@*5*2-9#8RoXsHwb1u{a+`&15$Egc>wNKo~WNr`zE zSPNp?fjL)#KJqgh0zFGx)=6X>J05;^30t1;X7u{y5vV=E;FvE^0B@KyDS9JJ`Dh^w>RI|wfO5vj*IINb8tXSCQZmu3YJoSkEq z%J@meq>a`}9m%!c&;#^wOtbroF90AA%X)0#4TKynwT|}QyOF@!$8=IR0BetNH!9#M{zCBYR2@)z}U;H?cQJkDp(TGQ~2?7 zY0P)rPUENs=DS<7`*dZU9vk+NugNFKgxeq-PWgd=&%i9#KmzuQkf#<9Af32xb{>0G zZ`Klz=eP|@0d8Pv#jv>Fw7TcOb%0Q%xo@B~y$DxXQixbW&~`-~i)I!u4XIf8D>U|@ zj%&Oxhe7U?U|}a-BRl}x1)jrRen2r_k|-St_3Pw*2@Q*oup(E+3{W0%`O}(REf8J! zA7pv??f^hi$|5p*bA$)i1F*;SYrv(lbJ#V@o<^Qe2xASxr`O?A^Ouh_;TdpSo}4k# zPqUMe*=A0jiiLTBaNr*3u* zdVUk1s6GkW#iae*QDyA6raVtaaB_5qcYeVzVG}2lodgVI|I85{S?OV7Ejr2>ZA+T- z-BJpN`N~s#0nd z+S^iCb~vz@$>I@IFm3c;cRTRg!nX`r%r`q>1Ke5ZD@5T~_=6s?W*a6ynlxJ|Za?*4 zWF;EckrE`z&UCh25VPGnN8{i?gou2J1!cy8?!K(qZ3Dunf?WZ-gi}YNS=~Z8>bDLd z&s`Vt6g$9MdLok;V3deu_t-$ljavQvs4T!3@G62V3qw|Az9Ay(o$0mlm{allHDb{A zmuk&E)#z*I|%GlFLR3}EJH&OTHs@8Stu@D|kvOqV2` zKsKDtV7?WwRk30~SinOV6}!etf~c1_eI8Dqi+DgKf}<8-4?cmQ$i{3Pzc2Vg+iNPg zv*0+BqXFAxi;sSv}Vo$Ka2hnFwVS!#7c2)oe;b?3O^Z2j-8Tl3YkYW!I z3e`q9pQ0~U1BU8pBEEvg^8rh`@&_j%|N9?1?tO9l!$@T0vdIdey##@qp<`J{KWv$1 zh|}KQ_Y13}*w@_R>D4U8B@ddJs2&w)aLii*(%cQG$ym0OD%hvGY+e57Wjfb3-8ek7lMvg~dF1%2O4QBKFcjnwba$1xNd!8C^j+ zj=9Iv&iuC%HpP?HAgs5hIEeS0S*|aVU41IZ9Kw>{_vfndNB_kq1H-jCYXYY?+N@NN ziSArber*N%=}+XsGQyJrXOY!?}KU*(((agwt7BxXu zXdK3U@8Rl&=ay@%E)mN@f}O#|jolR|>$2V>KQ&Vj7~?6-q;@9+Q-9`eQiiz-7fuoT z9cX2+V7|w}ftL9-4KzA|Te!w`3F^;33t^bM6BWR zI8$W;!OR%TTI(@J75@-Pp6UcpTo3H^D^f7ev!qEQ9=g%aC5nX30N&#hkC1>=>yE?y zJ-XzGDjsuPuwEfh8;)gZbvK}$KrzsV0#B_HkH+J-(+zpfW;uavavv%PPDS`c?MP!} zI_OX=h3t}3+7QQ9cBOTtHAB!m0t$i;?H}=k+*S;X>4r{dOSFk;jg5dTzvX!hZ3@Nt z`Zvf%P7}H!Wt3$@qsu zTz6STfVU36Y3GnEM(Yth=6sCSz~>G?{aZ&J7fc~wvb*?LMdW^R(Sg->(5i`Cg^KP+ zQ4mW57~fGXhqA0 zkNDDw&h~&lEFl$GR00$3T~L3dLCWl^x`Whuk^DbJP~C|M9l*U+lPfu_jDB@uCXX(z z+8zbAg1=6!E`^=40ZnIa>@k@jmu%+eBvceloeWeKF*2CMF!2Nj+pX?U1EL9R`ZJ#4 z-h5O7!uH6 z-XhwP4MM;3LBcxq@)Fix2skG}&GdTJ$csr*R9{{Djb5+WZB}Linr0GsfNfqb7{x-h zhNXdP^|1Dcy|ksXcLp6Q6Svnt60%B8pDGRPR?=V&VXB;`PdeIHdUA0?3gUQgrr$RiCAPEja1};WKvD>oB>-;VRErz1D}utF?ib_B0`r(#?nQTl_Z_SsWG{Tr9rZo8AAV8w36JK(9XYM|5C z6-6zEhhP<+)9kKD#j}{T3 zB%&9ny#g?jACJ+lno{YsdXDIKX3*j9Zhy1K>6$b{^AyFx|j))XMnpLFUuKXMFqQT`Wn_vS?Pa z+pyZXZX4KY&ZPo|x%)RM{@Y2nBx8N(pGRxPwNS$W5KecFM{^^ft#}k<`h~XjfTQ&TS zko`$lNXay1*(g9m3vr)J*cwiBBJJG8^zd4d$>`s=5&mM}y2x(nUq|#US}+2m zsX%II9ir*5+m9!AE7Tz%;U=yNJCr0-9P%LKil>da*_0)XiX)_Ms{LzyCIP}Zr>fqA zAIZSy_{<7|+&x(F+d(Mbjl27d{j2=ARanY4 zx_8l$ZJw|X_pc{1wp`iN#?ys4E7eUcXRx;v&SB`~iUOPj(0cAK%7I3gq!Yx}L@a4_ zDe2{O(pAFUTJ)B|jw_Y-f!Qc+W}GbtL7+wOq_kb8xC|+pz6A9ywxQcwx zxdJvMdqoJ{xcjfou;j$&p4MG86dL$eWJ^3GdL8MMcS9*q?fQH9Y~Hdl47U%fTK|smxaucOA zqGY}o?Yt{P_7?QwK&M=P=L5{(G4~EkAGP{ou5cKPn4D~1KU;R0;37(j4hTuig2N;@ zpmAx`O{Ce%HkqBr!^|NdP3cQ%nQd!{wC@d&L2#_4l?+~o5kFKuB}}*F=^eLA)wx0?73EbHrCjV-jI|Iuv34wyp;vdUTU;tzB6rWRN{UafV($}K9a=l6+Y60 zByWicH;?aO55X7wS+`X-2wF57-Q4SIw@0!&-m+L@)^TTKp#Ouk6YD9*g5fs4KFmwPR zkbE(_sYKXUAD+X^K9)eGD!kQvH8_M}5w()+8~}yG^kB@EZb%Oo{*9HVwpgbiMeN=< z_GI<~#XiC>JQI25(n`MhN9L<(VO}75%@@Hi_&0n&O(dcRdGEHx*aClGFLuWcPV&8k ziNhx6KPxt-tc)p4`9ezi(lS14Yv9bj#0=dSvNugEpnAfEaXUy*qUi)uTOpsgV9j+m zv@PhZIj?D{h38#KW(XO}g&u8t(DGn*=Fy@&=a0(f!@BdF_$_`tYfau-ZN66R#XZz0 zuG^F%1v&3=GeQXxZ!A7$J+wLnN9QSrAj~FWsF{!WZee zp`D3R-ansTvpYpiTUg~b&*8-V{cX#Swy4ijs&P*)+r>sqRsMeerH>3dxKq#DFeIH1 z+O}&-d$U&1@1Sj?9Gve5U$nTl`9U-LjiTA1UBM|{_e$QY)OznPR!=Q{ zl~RB0{`aF@{kuV5+pou|NoyH@Eyh@d|G97Y*PdLHk79Y05~o?J-G;fuq&*9t1{Y?j z6X|D+7iZnr$sP+b`$T800%k-ej_>+4I&g1VVKQ&p?o0Du6YR9btchdFCJO^b-f1%4 zRz`8D;bya3d2gn9-Gdu;OnJeQU-3I{mmfS4Kuv4CE%M{Zc@5dx!jEYJ<7u_m>-L6T zyqDNW_dfgO(2R&!Ub^v*t;4?Stg3Q5n;u5pGUm?bf9@Ejk2WRLWGO6`#(w*&KTK-v zFXzR_;e&qtVJ~C`#zNLQVlpGyh8d6Ti%K4}RLLD-n*S);ToN7C6A)BCQ!Vzk1VHyf z^E*z>Vn5&1LdeotEaq!T`FzqPk8jWWNn7AQ_&v*C&8(!1LgtGlLSpy{7jK`CdS&9m zRd4~=g&*s#W>3S;lcMf%Ybnqw{Pcp8r$Tn;@-;?SE&nIO=AyT0vk7a{%(mrb>L{O! z;T5OwxaC*w?sxxOSiD6rd)QJ7L>hj+_q2Lsv-;$HJE=~~UlkVa=qfN0%x-16Ep?8s zaQ`e^Vj#Hx!x{JG|9H56KIZ}vRoolvm)c=?KAkGl>>y;0SE}$D#_9{$re23d?`l#H zE;s!C<$$S|m#ALBTqP1Zuj`(PTSHHm)iNizyG%a|GO2? bY+V%m))ZOlc>m^C)RMzCXWI&E-?RS@!U7OX literal 0 HcmV?d00001 diff --git a/server/templates/details.html b/server/templates/details.html new file mode 100644 index 0000000..51c9ed8 --- /dev/null +++ b/server/templates/details.html @@ -0,0 +1,131 @@ + + + + + + + + + + + TTS engine + + + + + + + + + + Fork me on GitHub + + {% if show_details == true %} + +
+ Model details +
+ +
+
+ CLI arguments: + + + + + + + {% for key, value in args.items() %} + + + + + + + {% endfor %} +
CLI key Value
{{ key }}{{ value }}
+
+

+ +
+ + {% if model_config != None %} + +
+ Model config: + + + + + + + + + {% for key, value in model_config.items() %} + + + + + + + {% endfor %} + +
Key Value
{{ key }}{{ value }}
+
+ + {% endif %} + +

+ + + +
+ {% if vocoder_config != None %} +
+ Vocoder model config: + + + + + + + + + {% for key, value in vocoder_config.items() %} + + + + + + + {% endfor %} + + +
Key Value
{{ key }}{{ value }}
+
+ {% endif %} +

+ + {% else %} +
+ Please start server with --show_details=true to see details. +
+ + {% endif %} + + + + \ No newline at end of file diff --git a/server/templates/index.html b/server/templates/index.html new file mode 100644 index 0000000..67f46b4 --- /dev/null +++ b/server/templates/index.html @@ -0,0 +1,152 @@ + + + + + + + + + + + TTS engine + + + + + + + + + + + + + + +
+
+
+
    +
+ + {%if use_gst%} + + {%endif%} + + +

+ + {%if use_multi_speaker%} + Trieu un locutor: +

+ {%endif%} + + {%if show_details%} +

+ {%endif%} + +

+
+
+
+ + + + + + + diff --git a/server/templates/websocket_demo.html b/server/templates/websocket_demo.html new file mode 100644 index 0000000..1446751 --- /dev/null +++ b/server/templates/websocket_demo.html @@ -0,0 +1,147 @@ + + + + + + TTS Engine - Websockets + + + + + + + + +
+
+
+
    +
+ +

WebSocket Audio Streaming

+ + {%if use_gst%} + + {%endif%} + + + + +

+ + {%if use_multi_speaker%} + Trieu un locutor: +

+ {%endif%} + +
+
+
+ + + + + + + \ No newline at end of file diff --git a/server/tests/__init__.py b/server/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/tests/api/__init__.py b/server/tests/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/tests/api/views/__init__.py b/server/tests/api/views/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/tests/api/views/test_api.py b/server/tests/api/views/test_api.py new file mode 100644 index 0000000..e1ad3d5 --- /dev/null +++ b/server/tests/api/views/test_api.py @@ -0,0 +1,81 @@ +import pytest +import json +from fastapi import status +from fastapi.testclient import TestClient +from server.tests.base_test_case import APIBaseTestCase + + +class TestApi(APIBaseTestCase): + @pytest.fixture(autouse=True) + def setup_before_each_test(self): + self.setup() + + def test_text_to_voice(self): + options = { + "voice": "f_cen_095", + "type": "text", + "text": "hola" + } + with TestClient(self.app) as client: + response = client.post(url="/api/tts", content=json.dumps(options)) + + assert response.status_code == status.HTTP_200_OK + assert response.headers["content-type"] == "audio/wav" + assert response.content is not None + + def test_text_to_voice_error(self): + msg = { + "message":"sfsfs is an unknown speaker id.", + "accept":["f_cen_095","f_cen_092","m_occ_072","m_cen_pau","m_occ_7b7","m_val_51c","f_cen_063","f_cen_051"] + } + options = { + "voice": "sfsfs", + "type": "text", + "text": "hola" + } + with TestClient(self.app) as client: + response = client.post(url="/api/tts", content=json.dumps(options)) + + assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE + content = json.loads(response.content) + assert content["message"] == msg["message"] + assert content["accept"] == msg["accept"] + + + def test_list_voices(self): + + with TestClient(self.app) as client: + response = client.get(url="/api/available-voices") + + assert response.status_code == status.HTTP_200_OK + voices = json.loads(response.content)["voices"] + assert voices == ["f_cen_095","f_cen_092","m_occ_072","m_cen_pau","m_occ_7b7","m_val_51c","f_cen_063","f_cen_051"] + + + @pytest.mark.repeat(25) + def test_multi_larger_text(self): + options = { + "voice": "f_cen_095", + "type": "text", + "text": "" + } + for sentence in texts: + options["text"] = sentence + + with TestClient(self.app) as client: + response = client.post(url="/api/tts", content=json.dumps(options)) + + assert response.status_code == status.HTTP_200_OK + assert response.headers["content-type"] == "audio/wav" + assert response.content is not None + + +texts = [ + "Els vents del nord bufen amb ferocitat, sacsejant els arbres i fent xiular les finestres dels casals. Els habitants de Sant Miquel de la Vall s'abriguen amb abrics gruixuts i es refugien al caliu dels seus llars, esperant que la tempesta passi. Les muntanyes que envolten el poble es converteixen en una estampa impressionant, amb els cims nevats brillant sota la llum de la lluna. Malgrat el temps advers, la vida al poble continua amb la seva rutina habitual. Els pagesos treballen les terres, preparant-les per a la nova temporada de cultius. Els artesans confeccionen objectes amb habilitat i destresa, mantenint viva la tradició ancestral del poble. Els nens corretegen pels carrers, jugant amb la neu i gaudint de la seva infantesa sense preocupacions. A les tardes, els veïns es reuneixen a la taverna del poble per compartir una copa de vi i conversar sobre els esdeveniments del dia. Les rialles ressonen en les parets de pedra, creant una atmosfera de camaraderia i amistat. Amb l'arribada de la primavera, el paisatge es transforma en un mar de colors, amb les flors brotant de cada racó del poble. Els ocells tornen a cantar amb alegria, omplint l'aire amb el seu càntic melodios. Els habitants celebren l'arribada de la nova estació amb festes i celebracions, agraint la renovació de la vida i l'esperança que porta amb ella. Sant Miquel de la Vall continua sent un refugi per a tots aquells que busquen pau i tranquil·litat en un món en constant canvi." + ,"Les campanes de l'església del poble repiquen amb alegria, cridant a tots els habitants a la missa del diumenge. Els fidels s'apressen a arribar, vestits amb les seves millors robes i amb somriures al rostre. El sacerdot pronuncia les paraules sagrades, recordant als fidels els valors de la fe i la bondat. Les veus del cor ressonen en l'espai sagrat, omplint-lo de música celestial. Després de la missa, els habitants es reuneixen a la plaça del poble per compartir un àpat frugal i conversar animadament sobre els esdeveniments de la setmana. Hi ha una atmosfera de calma i serenitat que impregna l'aire. Els nens juguen alegrement pels carrers, rient i cridant amb innocència. Els adults observen amb tendresa les seves cabrioles, recordant els dies en què ells mateixos eren joves i despreocupats. Amb el capvespre, el cel es tenyeix de colors pastel, creant un espectacle impressionant que captiva la mirada dels espectadors. Les orenetes volen en cercles al voltant del campanar, anunciant l'arribada de la nit. A mesura que la foscor envaeix el paisatge, les estrelles comencen a brillar amb intensitat en el firmament. Els habitants del poble es retiren a les seves llars, amb la sensació de pau i benestar que només es troba en el cor d'un lloc com Sant Miquel de la Vall." + ,"la riquesa del territori, amb plats elaborats amb productes frescos i tradicionals que captiven els sentits. Els vespres d'estiu són màgics a Sant Miquel de la Vall, amb la gent reunita a les terrasses dels bars, gaudint de la frescor de la nit i de la companyia dels amics. Les converses flueixen lliurement, embolicades pel perfum de les flors i el so dels grills que canten a l'horabaixa. És en aquests moments que es crea el veritable esperit del poble, una barreja d'hospitalitat i calidesa que acull a tots els que s'hi acosten. Al matí, els primers raigs de sol il·luminen el paisatge, revelant la seva bellesa oculta i donant pas a un nou dia ple de possibilitats. Els habitants es desperten amb energia i entusiasme, preparats per viure les aventures que el futur els té reservades. Amb cada sortida i posta de sol, Sant Miquel de la Vall continua escrivint la seva història, una història que parla de superació, solidaritat i amor per la terra. És un lloc que captiva el cor i l'ànima, un lloc que mai s'oblida i al qual sempre es torna amb alegria." + ,"Els matins a Sant Miquel de la Vall comencen amb el cant dels gallins i el soroll dels primers vehicles que es dirigeixen cap als camps. El sol es filtra entre les branques dels arbres, il·luminant suament els carrers tranquils del poble. Les olors de pa fresc i cafè recent torrat es barregen en l'aire, convidant els habitants a començar el dia amb energia i optimisme. Les botigues del poble obren les seves portes, i els comerciants preparen les seves parades amb productes frescos i artesanals. El mercat local es converteix en el centre de l'activitat, amb clients i venedors intercanviant salutacions i notícies mentre negocien les seves compres. Els nens van a l'escola amb les seves motxilles plenes d'il·lusions i llibres, preparats per aprendre i créixer amb cada nova experiència. Mentre tant, els treballadors de les oficines es preparen per a un dia de feina, revisant correus electrònics i fent trucades importants. El ritme tranquil i ordenat de la vida al poble es manté, amb cada persona fent la seva part per contribuir al benestar col·lectiu. A la nit, les llums dels llars brillen a través de les finestres, creant una estampa reconfortant de calidesa i seguretat enmig de la foscor. És un recordatori constant dels llaços forts que uneixen als habitants de Sant Miquel de la Vall, i de la seva capacitat per superar els desafiaments junts." + ,"Les tardes a Sant Miquel de la Vall són tranquil·les i apacibles, amb els habitants del poble gaudint dels seus moments de lleure i descans. Alguns es passegen pels carrers empedrats, admirant l'arquitectura antiga i els detalls artesanals de les cases. Altres es reuneixen als parcs i jardins del poble, on es pot escoltar el xiuxiueig del vent entre els arbres i el cant dels ocells. Alguns habitants prefereixen quedar-se a casa i llegir un llibre o escoltar música, mentre que altres es dediquen a les seves aficions com la jardineria, la pintura o la cuina. Sigui quin sigui el seu passatemps preferit, tots els habitants de Sant Miquel de la Vall gaudeixen dels moments de pau i tranquil·litat que ofereix el poble. A mesura que el sol es pon i la nit cau sobre el paisatge, els llums del poble comencen a brillar, creant una atmosfera màgica i acollidora. És un moment perfecte per reunir-se amb amics i familiars i compartir una bona conversa i un got de vi. Malgrat les preocupacions i els reptes de la vida quotidiana, els habitants de Sant Miquel de la Vall troben sempre temps per apreciar les petites coses i gaudir de la companyia dels seus ser estimats." + ,"Els colors de la tardor pinten el paisatge amb una paleta de tons càlids i vibrants. Les fulles dels arbres canvien del verd al groc, taronja i vermell, creant un mosaic de colors que captiva la mirada dels espectadors. Els vents suaus porten consigo l'olor de terra humida i la promesa de nous començaments. Les muntanyes que envolten el poble es vesteixen amb una capa de fulles caigudes, formant una catifa natural que crida a ser explorada. Els habitants del poble es preparen per a la temporada de recol·lecció, omplint les cistelles amb fruites i verdures de l'hort. Les festes de la tardor omplen el calendari, amb celebracions que honoren les tradicions i costums ancestrals. Les fogueres crepitants il·luminen les nits fresques, mentre els habitants del poble es reuneixen al seu voltant per compartir històries i rialles. És un temps de gratitud i reflexió, quan els habitants del poble donen gràcies per les seves benvingudes i esperen amb il·lusió els temps que estan per venir." + ,"Les cases de pedra del poble s'alineen al llarg dels carrers empedrats, formant un laberint d'històries i records. Les parets porten marques del temps, amb pedres gastades i finestres adornades amb geranis de colors vius. Els balcons estan engalanats amb banderoles i catifes durant les festes locals, creant una imatge festiva que il·lumina el carrer. Al capvespre, les llums suaus dels fanals s'encenen, projectant ombres misterioses sobre les parets de pedra. Els veïns es reuneixen als balcons i les places del poble per gaudir de l'ambient tranquil i contemplar la bellesa del seu entorn. És en aquests moments tranquils que es poden sentir les veus del passat, recordant als habitants del poble la importància de la seva història i la seva cultura." +] \ No newline at end of file diff --git a/server/tests/base_test_case.py b/server/tests/base_test_case.py new file mode 100644 index 0000000..7522f29 --- /dev/null +++ b/server/tests/base_test_case.py @@ -0,0 +1,27 @@ +from fastapi.testclient import TestClient +from main import app +from server.helper.config import Config + +class APIBaseTestCase: + + def setup(self): + self.app = app + self.client = TestClient(self.app) + + +class configBaseTestCase: + def setup(self): + config = Config( + model_path="models/vits_ca/best_model.pth", + config_path="models/vits_ca/config.json", + speakers_file_path=None, + vocoder_path=None, + vocoder_config_path=None, + speaker_ids_path="models/vits_ca/speaker_ids.json", + speech_speed=1.0, + mp_workers=1, + use_cuda=False, + use_mp=False, + show_details=True, + args={} + ) \ No newline at end of file diff --git a/server/tests/test_config.py b/server/tests/test_config.py new file mode 100644 index 0000000..f30917e --- /dev/null +++ b/server/tests/test_config.py @@ -0,0 +1,14 @@ +from server.helper.config import Config +from server.tests.base_test_case import configBaseTestCase +import pytest + + +class TestConfig(configBaseTestCase): + @pytest.fixture(autouse=True) + def setup_before_each_test(self): + self.setup() + + def test_model_voices(self): + speaker_ids = ['f_cen_095', 'f_cen_092', 'm_occ_072', 'm_cen_pau', 'm_occ_7b7', 'm_val_51c', 'f_cen_063', 'f_cen_051'] + speaker_config_attributes = Config().speakerConfigAttributes.__dict__ + assert speaker_ids == list(speaker_config_attributes["speaker_ids"].keys()) diff --git a/server/tests/test_worker.py b/server/tests/test_worker.py new file mode 100644 index 0000000..ff284dc --- /dev/null +++ b/server/tests/test_worker.py @@ -0,0 +1,22 @@ +from server.helper.config import Config +from server.tests.base_test_case import configBaseTestCase +from server.workers.workers import worker +import pytest + + +class TestWorker(configBaseTestCase): + @pytest.fixture(autouse=True) + def setup_before_each_test(self): + self.setup() + + def test_worker(self): + config = Config() + speaker_config_attributes = config.speakerConfigAttributes.__dict__ + wavs = worker("Es una prova", + speaker_id="f_cen_095", + model=config.synthesizer, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"]) + + + assert len(wavs) > 1 \ No newline at end of file diff --git a/server/utils/__init__.py b/server/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/utils/__pycache__/__init__.cpython-310.pyc b/server/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebca37b9872b1861f21d28a02910d0fc9b1d95b9 GIT binary patch literal 154 zcmd1j<>g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hmenx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fX-Q^Iv3`7fW?p7Ve7s&k Xm}h5VU3MCO zfPc^)`AfcX+MN?8o>MASh$p=lKl}5S?>)b&-`^!@zrKH*JY|IZ^vSk`n4F=T=NKfB zv>@sFOH=lqNG5}CLIP-Da0!+@(%@GDgQYuNxWY}`hG-&U3 z+}=oks$izTyv=N0&Bs{>;T!YoF8Vuk^BIFBO>jpoU9uIKg>cx=B{D6;CS32W*oxA> zD9L6Np4pDSK}m}YJaR=_Ow_Q3OuKj*P^{5cGG06xiLZHCmxC)&)M_xPU~qGCJIKx8 z=JfWSqJqWaQ9l1a+DiM zc@EW7W!5CtnioRn2~u7uNNSrGCJ}Ja{^eL-yO{G#6b0wL*QSSIpB~X3i|IZ+V2elp zywC9lKBVJe=x=+SoV#7l%SzUTU-vlwQj21J0*A=iO5F+veh(**_$e`d>|*kOwL92} vcq7p5m33ir4OOO$!J@&dbkTZNTKE>JZwGj07r$C-dE;NqL>OWcV_AOyvO327 literal 0 HcmV?d00001 diff --git a/server/utils/__pycache__/utils.cpython-310.pyc b/server/utils/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe5a23e4c59d42b432dd51065d9574909288c169 GIT binary patch literal 1123 zcmZux&2H2%5VjL1P19CXg%(vHWFaAKMCsD&szUtStBM5yi%?`@?{2o)B(j~Yw#uH` zE8-QpM_z$P$(2)I0WORkTJ^w0Gqxx5^Lb{HVq+sjKtJanhie3(U*>R|d^o%WRT%;ZQl>zT+$dK{~SWkMR0E2tKNYxrr-MXn>iAw?Q?I}_#x z80`Ro{@dH+!0KYQ>cd6 z;qX&sM mp.cpu_count(): + parser.error("Maximum value for {0} is {1}".format(option_string, mp.cpu_count())) + #raise argparse.ArgumentError("Minimum bandwidth is 12") + if int(values) <= 1: + print(values) + parser.error("Minimum value for {0} is 2".format(option_string)) + + setattr(namespace, self.dest, values) diff --git a/server/utils/utils.py b/server/utils/utils.py new file mode 100644 index 0000000..806bc52 --- /dev/null +++ b/server/utils/utils.py @@ -0,0 +1,32 @@ +import json +import os +from typing import Union + + +def update_config(config_path, velocity): + length_scale = 1 / velocity + with open(config_path, "r+") as json_file: + data = json.load(json_file) + data["model_args"]["length_scale"] = length_scale + json_file.seek(0) + json.dump(data, json_file, indent=4) + json_file.truncate() + + +def style_wav_uri_to_dict(style_wav: str) -> Union[str, dict]: + """Transform an uri style_wav, in either a string (path to wav file to be use for style transfer) + or a dict (gst tokens/values to be use for styling) + + Args: + style_wav (str): uri + + Returns: + Union[str, dict]: path to file (str) or gst style (dict) + """ + if style_wav: + if os.path.isfile(style_wav) and style_wav.endswith(".wav"): + return style_wav # style_wav is a .wav file located on the server + + style_wav = json.loads(style_wav) + return style_wav # style_wav is a gst dictionary with {token1_id : token1_weigth, ...} + return None diff --git a/server/views/__init__.py b/server/views/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/server/views/__pycache__/__init__.cpython-310.pyc b/server/views/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d573f4ab641cd50c1f7981b967973d02fbf125f GIT binary patch literal 154 zcmd1j<>g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hmenx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fS!QZ^v3`7fW?p7Ve7s&k Xg`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hcenx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fS!QZ^F;H!$etdjpUS>&r ayk0@&Ee@O9{FKt1R6CH-#Y{kgg#iGaF(tnM literal 0 HcmV?d00001 diff --git a/server/views/api/__pycache__/api.cpython-310.pyc b/server/views/api/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..073ac940a55294b21af3e59505c023d3f9b9df17 GIT binary patch literal 6186 zcma)ATXP%9b)Fs!fWd_zc#%l1ZX>Ox|Bqg0taLYf0zJ!E(6LTG9DR@J4v9bx!B2!TIn)>w?bD1Q)|g ztxGyz3!LzB>vDLdbw!tFgEzygt*ha+)-_$O2XBSfTi11dE_gd^v>M@!)(u^r58esi zZM_?=wpN)TN_>GY9+c)(4`w^C4dn z=XC9N#Rq(OWb&0`#+UgU$2s(OVOn!8t7-k&mf?L5?QZk)qRuZIoBX1f>zMo!cb-~@ zto41-9Ny-aM~q+LZ$34^xud18z9fB(zon&rP%^aqb;!TX8(RJ@zro*mYP9|wH9zF< zqUNHwgb`MGQ@6PX{zrTb{F`D1{9F7z&HtFQ4Wse?@5x%$8!Ro|dvO0@+*3lLQ2lJ< z%k_sM>BdnasPZeZxe>P?3)L{wxeXdQ$mb zfNZ|K?fDUPw;{Gd5oKdp4?MLEW@STp%5VSJ4@83E>}P(o~I_<&Wd2 z<8OVrzWy^bs_exQ0&W~dPtv(lg=x2T;_V7|&)ZEGgV^I*(p4&Pdy$W2rv~~gbfxI{ zPf}AvFIZ{^#oA?pi0 zn0TY0H7au$Nv;vON#quh_du|@g-l0UEg5+i6)@%L0%&E44N@>ow#W+K$`4WK&#vu> z%|su|RW8E#|Fi~q72>qt3)2QJH8|OLc?}Y>L4+*~n+`4QZo&v;I&+*(fyS!ZcwVoN?xp&cz7Cd@ z@m)RPoX?-=W@&NEe|6f6e&j26(r9dTKf?$KO{{8GnfwTuzO6R{N9QF&sF4glvcTj9 zoP;)brr;RJB|zUMGX`uYw_`mg$Q_208R- zuNa^u(Be_yD9_8s<%7IhP>aJegZzjcGL+5^mqx}xK`jsRgTf)^9{sag8NM+vN9?(& z&hg42)9X3^9BUk2fNT-ms^%7l3w&ma9kE4Y#Jr2(*NCsqsY~kOA^K;=Gxp3xn4IPH zr?zs`r6ZI0pTIT`Y{<`L@&jAT=AK#4^3Mt#i_i0gr}jY!TD^S42vq#T@*@P`AMJx zpb1B93rC+z$BVe5Fq)%cXHz)IeuTQj?+e~^?klG&WG9wkLM$aeU=$dhN%KNAv#b-d~PII)El83w-_gp3dfR@-&-x|I<=p?ofpPfJt{3>c zd7`z~VA}uE#{@x94^ga~q}T1nQo$mork7e!+v7(Fsd;4DLVgb}52f-+07*-eeiC^O zwP}SSHm2iYQR!?X_9(v7WL-p?MoIn%Lh>$2Rx@WN`wM? zS|RvGu(T&dI-@(pX1Ih?`6HeTMN78dRgNyEx;3R#)PIRr4t zOtKL)e9>TVaw(yXe+S`Y9>iw0Wn1N3ffbm=3jb~8`^zt>eSCSLMmPqgv10^2d-d8t zfLN=W0FJf}`vA5<3O`~U>y%{x;?RQS7a*_;IvE+5oz7MM6uf`I_YIlCvhmtr1X?!m zC8q})u%R(BhHPY<9?S$LfBRoC!zoTCM0;FHku6+%f zCNB^Q?t8A!jowZ4*+R#?10j|KB}@sHC9)Tgm}^m6g(yEg&TL=38NAUxg|K=iW9vl z4Lj40zV1Aru{0vr{`BWm)Oylt(-xkC<^C1;Ne85`STk*_U=~>2EVCM`GRotxX%##Z zC7Z1NziI^=^<~S}9Glg_ud?%b+qBudSzEZm&VO4g)Id$ERs|P$**42&)s*`fsz0-) ztzGnlV>0j|BbOQedD40z4KEVH{8m$dYU`^J)S%SkF<|Z|8Th=f? zFoy*WggRot#gxw>hs(pTsO&)w=$9Q5C$BlB0s20KG*B)ZmI>#M%*i-k*DOO-_BR-C zEi?KRRUOU{s{0C90l7<%M`|0PIZhvSVyVYVEA*&?P)mcSOuTaDah=kz zEqk8%4|I??Am*=OLb%nLFds6a`CL6WpEJLy&7NiUug!1e-$AJHNje+Cp#2lfC_V7y zfXE?8YQ~8qlT7nbFYNA1LHT0G!Vko`vEdkz{>o$wyqs%5F3BqCl}&92{-&%@X<^bg zhR2f{0k}lc9Wb4r)QlgP088WyRRA~8OG^`xt_SHRP9x5?2D2IBk#vttFOJ2see5Ye zNSdeer%YpREQ#IEp5BtoWs(O?7sr*`A^=WmjvqiM0;OliRpV=JbDZkXs+}n$+S2a= zUQA=Seu2}TG`o1`rGCgH8;0YeD|ZoZZ2D!b=xrvNgO@DQi|gX44nQnYsZD%5Z%GRO zJw)@b(Ox=4bjbS^<>)C!zrN`A5B*$_xm`F&*dck8bauMeZZ;7zwNqBBW0rP$_MJ@M zcPStze(NDivD#?v%IFcy5&&$5Gi7Gmb^%ZF Izp_#PKZ44Ad;kCd literal 0 HcmV?d00001 diff --git a/server/views/api/api.py b/server/views/api/api.py new file mode 100644 index 0000000..aa23019 --- /dev/null +++ b/server/views/api/api.py @@ -0,0 +1,246 @@ +import multiprocessing as mp +import asyncio +import traceback +import os +import io + +import numpy as np +from fastapi import APIRouter +from starlette.responses import JSONResponse +from starlette.websockets import WebSocket +from fastapi.responses import StreamingResponse, HTMLResponse +from fastapi import Request +from functools import partial +from itertools import chain +from pysbd import Segmenter +from pathlib import Path +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates +# from TTS.config import load_config + +from server.modules.tts_request_model import TTSRequestModel +from server.audio_utils.audio_utils import generate_audio, play_audio +from server.exceptions import LanguageException, SpeakerException +from server.helper.config import ConfigONNX +from server.workers.workers import worker_onnx, worker_onnx_audio +from scripts.inference_onnx import save_wav, load_onnx_tts_unique + + +route = APIRouter(prefix='') +# Initialize sentence segmenter +segmenter = Segmenter(language="en") # NEED TO BE CHANGED? THERE IS NO CATALAN BUT SPANISH +path_dir = os.path.dirname(os.path.abspath(Path(__file__).parent.parent)) +route.mount("/static", StaticFiles(directory=os.path.join(path_dir, "static")), name="static") +templates = Jinja2Templates(directory=os.path.join(path_dir, "templates")) + +sessions = [] + + +@route.get("/", response_class=HTMLResponse) +def index(request: Request): + speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ + return templates.TemplateResponse("index.html", {"request": request, **speaker_config_attributes}) + + +@route.get("/startup-parameters") +def parameters(): + config = ConfigONNX() + return JSONResponse( + content={"speech_speed": config.speech_speed, "mp_workers": config.mp_workers, "use_cuda": config.use_cuda, "use_mp": config.use_mp}, + ) + + +@route.get("/websocket-demo", response_class=HTMLResponse) +def websocket_demo(request: Request): + speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ + return templates.TemplateResponse("websocket_demo.html",{"request": request, **speaker_config_attributes}) + +''' +@route.get("/details", response_class=HTMLResponse) +def details(request: Request): # ¿PARA QUÉ LO NECESITAMOS? + config = ConfigONNX() + model_config = load_config(config.config_path) + if config.vocoder_config_path is not None and os.path.isfile(config.vocoder_config_path): + vocoder_config = load_config(config.vocoder_config_path) + else: + vocoder_config = None + + return templates.TemplateResponse( + "details.html", + {"request": request, + "show_details": config.args.show_details, + "model_config": model_config, + "vocoder_config": vocoder_config, + "args": config.args.__dict__} + ) +''' + + +@route.get("/api/available-voices") +def available_voices(): + speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ + + return JSONResponse( + content={"voices": list(speaker_config_attributes["speaker_ids"].keys())}, + ) + + +def init_session_workers(model_path, session_list, idx, use_cuda): + global sessions + session = load_onnx_tts_unique(model_path, use_cuda=use_cuda) + sessions[idx] = session + + +@route.post("/api/tts") +def tts(request: TTSRequestModel): + """ + Text-to-Speech API endpoint. + + This endpoint receives a TTSRequestModel object containing the voice and text to be synthesized. It performs the + necessary processing to generate the corresponding speech audio and streams it back as a WAV audio file. + + Parameters: + - request: TTSRequestModel - An object containing the voice and text data for synthesis. + + Returns: + - StreamingResponse: A streaming response object that contains the synthesized speech audio as a WAV file. + + Raises: + - SpeakerException: If the specified speaker ID is invalid. + - LanguageException: If the specified language is not supported. + + """ + + config = ConfigONNX() + + mp.set_start_method('fork', force=True) # spawn + + global sessions + + speaker_config_attributes = config.speakerConfigAttributes.__dict__ + + speaker_id = request.voice + text = request.text + + if speaker_id not in speaker_config_attributes["speaker_ids"].keys(): + raise SpeakerException(speaker_id=speaker_id) + if request.language not in speaker_config_attributes["languages"]: + raise LanguageException(language=request.language) + + model_path = config.model_path + vocoder_path = config.vocoder_path + use_cuda = config.use_cuda + + speech_rate = config.speech_speed + temperature = config.temperature + unique_model = config.unique_model + + if config.use_cuda or not config.use_mp: + wavs = worker_onnx_audio(text, speaker_id=speaker_id, model_path=model_path, unique_model=unique_model, + vocoder_path=vocoder_path, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, + temperature=temperature, speaking_rate=speech_rate) + + wavs = list(np.squeeze(wavs)) + out = io.BytesIO() + save_wav(wavs, out) + + else: + + sentences = segmenter.segment(text) # list with pieces of long text input + print("sentences are segmented well...") + mp_workers = config.mp_workers # number of cpu's available for multiprocessing + manager = mp.Manager() # manager to deal with processes and cpu's available in the multiprocessing + print("manager initialized correctly...") + sessions = manager.list([None] * mp_workers) # create a list of ID's of sessions + print("list of sessions correctly set...") + print(len(sessions)) + + # global sessions + # sessions = [init_session_workers(model_path, use_cuda) for _ in range(num_cpus)] + + tasks = [(i % mp_workers, sentences[i]) for i in range(len(sentences))] + + print("tasks initialized...") + print(tasks) + + def worker_task(task): + session_index, sentence = task + + global sessions + + session = sessions[session_index] + + # session = list(sessions)[session_index] # this is the ONNX session I need to use for inference + + print("session called for inference...") + # print(session) + + wavs = worker_onnx(sentence, speaker_id=speaker_id, model=session, vocoder_model=None, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], + temperature=temperature, speaking_rate=speech_rate) + + return wavs + + with mp.Pool(processes=mp_workers) as pool: + pool.starmap(init_session_workers, [(model_path, sessions, i, use_cuda) for i in range(mp_workers)]) + + # preload all sessions according to the number of workers available (num. of cpu's) + # ort_sessions = [load_onnx_tts_unique(model_path=model_path, use_cuda=use_cuda) for _ in mp_workers] + + with mp.Pool(processes=mp_workers) as pool: + results = pool.map(worker_task, tasks) + + + ''' + worker_with_args = partial(worker_onnx_audio, speaker_id=speaker_id, model_path=model_path, + unique_model=unique_model, vocoder_path=vocoder_path, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, + temperature=temperature, speaking_rate=speech_rate) + + pool = mp.Pool(processes=mp_workers) + + results = pool.map(worker_with_args, [sentence.strip() for sentence in sentences if sentence]) + ''' + + list_of_results = [tensor.squeeze().tolist() for tensor in results] + # Close the pool to indicate that no more tasks will be submitted + pool.close() + # Wait for all processes to complete + pool.join() + merged_wavs = list(chain(*list_of_results)) + + out = io.BytesIO() + + save_wav(merged_wavs, out) + + return StreamingResponse(out, media_type="audio/wav") + + +@route.websocket_route("/audio-stream") +async def stream_audio(websocket: WebSocket): + await websocket.accept() + + audio_queue = asyncio.Queue() + + try: + while True: + received_data = await websocket.receive_json() + + sentences = segmenter.segment(received_data.get("text")) + voice = received_data.get("voice") + + # create a separate task for audio generation + generator_task = asyncio.create_task(generate_audio(sentences, voice, audio_queue)) + + # create a task for audio playing + player_task = asyncio.create_task(play_audio(audio_queue, websocket)) + + # wait for both tasks to complete + await asyncio.gather(generator_task, player_task) + + except Exception as e: + traceback.print_exc() diff --git a/server/views/health/__init__.py b/server/views/health/__init__.py new file mode 100644 index 0000000..14edb53 --- /dev/null +++ b/server/views/health/__init__.py @@ -0,0 +1,32 @@ +from fastapi import APIRouter, status +from pydantic import BaseModel + + +health = APIRouter(prefix='/health') + +class HealthCheck(BaseModel): + """Response model to validate and return when performing a health check.""" + + status: str = "OK" + + +@health.get( + "/", + tags=["healthcheck"], + summary="Perform a Health Check", + response_description="Return HTTP Status Code 200 (OK)", + status_code=status.HTTP_200_OK, + response_model=HealthCheck, +) +def get_health() -> HealthCheck: + """ + ## Perform a Health Check + Endpoint to perform a healthcheck on. This endpoint can primarily be used Docker + to ensure a robust container orchestration and management is in place. Other + services which rely on proper functioning of the API service will not deploy if this + endpoint returns any other HTTP status code except 200 (OK). + Returns: + HealthCheck: Returns a JSON response with the health status + """ + + return HealthCheck(status="OK") \ No newline at end of file diff --git a/server/views/health/__pycache__/__init__.cpython-310.pyc b/server/views/health/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faa4ac78eab71f881dad5d43a99c2cbf1b08a742 GIT binary patch literal 1379 zcmZuxJ#QR05Z$laJDolO8$pmF7{E=gh|WoA7=aVnh!Z2yK~kD+2x>WZOL=#>A-R%I zt=+1~&q$wtl5X2n=~Jo7kh~}gU>=rBa>+Mu<~^R;-JM3%{{HoR`9~T>f4a%THlfK2 zddvlNA`)>OHE|I)Ns+{Ep44fR6MFSNQ^}OArd)e@6#g3(dAf-#bgzW zEiuL9a)Pds zN5l4Ok9SmI`S#}3DVk2T2BC4$z$&=nwG`YU@K%6E+v^stD{P@dy;Qo9?Fu*qQcyaS z!y!3&qaBOm*?5>R*761!8>WmAYG1qY4r9Ocyxx2nGbU8Ym_Ib6#_D08G2XU{Djp)@8+Yv(V}iOKaxVnlE`L=LYo^>iLz#YqP1^oH5x- z%h+MJ);Zyg=`n6Fjk5S4@oRVSeAo^N`Fw6ZS%g$6&rmD~)q>Mdj@Res3pn#)!4cIB zzJB@?zBqX^qw;2!ugowuebaEg9vY_ImO4V8}ThyR56_nKVqIyF$yyYuw zkXTR{N#Hdv@eodIwE=W0$`TEkPL>rpkm4#wLJ?AP-qBYJcf2|oW=eY(97BKKhuZNKCKPY6g`kf-e(8(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!H$enx(7s(xZY zYG#q4en4e%Mq*KJKv8~HYH~@jeo0BOZel^EesOA1S!$7fd45rLYEiL%e0*kJW=VX! ZUP0w84x8Nkl+v73JCMP}OhAH#0RU0IT#C`@iaFTVv~1|yj!8p?whK|UC5T)wAFb$@80!ERGhaEQ$Mx1{Ry?hG52&0dTnJeISb0UpHvQLNsGYe+qct%rG zy26qMv$x1TWzIeoG__fcxd$dQR*YF4=1y8m-Lx+2tj-z-bOzGAacIn&&|8Pq9#hs+ zk2P0}E%N+?Swpnbj%>{RL)5p^1=a%lhP3#1(v}3yZwf;;@n$)dWY&~*S=l(Uwnf2W z5iLY}z9eA{zTm7FhjiY>vknRKezj&TaYj=KLH)&C19`(ZGVeUbgB}xrBcjYK&W}tON}()>)L1z@ zE~l9|GPn<0o~{o@#W)^>lQ`j526y+OQOL)4d6CAECL)i$BB#I>s! zNpKTfAuRFz9FwN+DYuLvBv78J0bxNm1_@L47~QPA;|B!Qv^g!~Ak31m1Rr$rcqe#n zQmQ883!EE@DUX$>SCV`y07cA!jtt&IcmqKB+H0q*`9)mA6suqXcQsEX#8ps~?*h11 z+kteUKBNpK)}Pd}{^&YBd4(>6)+G%4I#}7d7Gd=@M(hecICI zW9ewZ=|JUoo)Jz*da|-{WgQ5|PrGO#{P`IPYw!hE6V5WMECJzkfN=b}AdcIFqnj5 zq+CqIXv)IA$-jhU)jCO3T;^Y)*)jswy)G7h4y1ehRh*wkcn#rogbN670`$-EKFV() zAfIvz;cbL>5U@=7WrS6P0RrYEzk+}%T7BypO4kwILqMX_HOb#c=>voh5pE*rI$Yo% zp}dArJ*fHxtO}2RjrvazZXxLReTvd&2)ejFMrj?Phk#|q@u%{om9_8>|9=l#OT3S3 zR`t~)4q2rCx6WwIclm8FM@C^#@gf-mB>@pw_&1>PoOY!44Z1}wml01Uq6CWHj5$(} xrhe^^%JVt~Sikz{Qz@mBOLK&~25B+P;&qI4iJou`m$XcmxTH%cbuYF0e*-X*K`a0O literal 0 HcmV?d00001 diff --git a/server/workers/workers.py b/server/workers/workers.py new file mode 100644 index 0000000..53ba33c --- /dev/null +++ b/server/workers/workers.py @@ -0,0 +1,142 @@ +import datetime +import re +# from lingua_franca.format import nice_time +# from lingua_franca.time import default_timezone +from text import text_to_sequence +import torch +import numpy as np +from scripts.inference_onnx import write_wav, load_onnx_tts, load_onnx_tts_unique + +''' +def worker(sentence, speaker_id, model, use_aliases, new_speaker_ids): + def substitute_time(sentence): + # Regular expression to find time pattern (HH:MM) + time_pattern = re.compile(r'((?<=\s)\d{1,2}):(\d{2}(?=\s))') + + # Find all matches of time pattern in the sentence + matches = re.findall(time_pattern, sentence) + + if not matches: + return sentence + + sentence = re.sub(r'les\s+', '', sentence, count=1) + + # Iterate through matches and substitute with formatted time string + for match in matches: + H = int(match[0]) + M = int(match[1]) + dt = datetime.datetime(2017, 1, 31, H, M, 0, tzinfo=default_timezone()) # Using UTC timezone for simplicity + formatted_time = nice_time(dt, lang="ca", use_24hour=True) # Assuming you have a function to format time in Catalan + sentence = sentence.replace(f'{match[0]}:{match[1]}', formatted_time) + + return sentence + + sentence = substitute_time(sentence) + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + else: + input_speaker_id = speaker_id + + wavs = model.tts(sentence, input_speaker_id) + + return wavs +''' + + +def worker_onnx(sentence, speaker_id, model, vocoder_model, use_aliases, new_speaker_ids, temperature, speaking_rate): + + global sessions + + def intersperse(lst, item): + # Adds blank symbol + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + x = torch.tensor( + intersperse(text_to_sequence(sentence, ["catalan_cleaners"]), 0), + dtype=torch.long, + device="cpu", + )[None] + + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x = x.detach().cpu().numpy() + + x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device="cpu") + x_lengths = np.array([x_lengths.item()], dtype=np.int64) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + print(input_speaker_id) + else: + input_speaker_id = speaker_id + + inputs = { + "x": x, + "x_lengths": x_lengths, + "scales": np.array([temperature, speaking_rate], dtype=np.float32), + "spks": np.repeat(input_speaker_id, x.shape[0]).astype(np.int64) + } + + return write_wav(model, inputs=inputs, output_dir='', external_vocoder=vocoder_model) + + +def worker_onnx_audio(sentence, speaker_id, model_path, unique_model, vocoder_path, use_aliases, new_speaker_ids, + use_cuda, temperature, speaking_rate): + + def intersperse(lst, item): + # Adds blank symbol + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if unique_model: + model = load_onnx_tts_unique(model_path=model_path, use_cuda=use_cuda) + vocoder_model = None + else: + model, vocoder_model = load_onnx_tts(model_path=model_path, vocoder_path=vocoder_path, use_cuda=use_cuda) + + x = torch.tensor( + intersperse(text_to_sequence(sentence, ["catalan_cleaners"]), 0), + dtype=torch.long, + device="cpu", + )[None] + + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x = x.detach().cpu().numpy() + + x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device="cpu") + x_lengths = np.array([x_lengths.item()], dtype=np.int64) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + print(input_speaker_id) + else: + input_speaker_id = speaker_id + + inputs = { + "x": x, + "x_lengths": x_lengths, + "scales": np.array([temperature, speaking_rate], dtype=np.float32), + "spks": np.repeat(input_speaker_id, x.shape[0]).astype(np.int64) + } + + ''' + inputs = { + "model1_x": x, + "model1_x_lengths": x_lengths, + "model1_scales": np.array([temperature, speaking_rate], dtype=np.float32), + "model1_spks": np.repeat(input_speaker_id, x.shape[0]).astype(np.int64) + } + ''' + return write_wav(model, inputs=inputs, output_dir='', external_vocoder=vocoder_model) diff --git a/text/__init__.py b/text/__init__.py new file mode 100644 index 0000000..f9adb0f --- /dev/null +++ b/text/__init__.py @@ -0,0 +1,53 @@ +""" from https://github.com/keithito/tacotron """ +from text import cleaners +from text.symbols import symbols + +# Mappings from symbol to numeric ID and vice versa: +_symbol_to_id = {s: i for i, s in enumerate(symbols)} +_id_to_symbol = {i: s for i, s in enumerate(symbols)} # pylint: disable=unnecessary-comprehension + + +def text_to_sequence(text, cleaner_names): + """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + cleaner_names: names of the cleaner functions to run the text through + Returns: + List of integers corresponding to the symbols in the text + """ + sequence = [] + + clean_text = _clean_text(text, cleaner_names) + for symbol in clean_text: + symbol_id = _symbol_to_id[symbol] + sequence += [symbol_id] + return sequence + + +def cleaned_text_to_sequence(cleaned_text): + """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + Returns: + List of integers corresponding to the symbols in the text + """ + sequence = [_symbol_to_id[symbol] for symbol in cleaned_text] + return sequence + + +def sequence_to_text(sequence): + """Converts a sequence of IDs back to a string""" + result = "" + for symbol_id in sequence: + s = _id_to_symbol[symbol_id] + result += s + return result + + +def _clean_text(text, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception("Unknown cleaner: %s" % name) + text = cleaner(text) + return text diff --git a/text/__pycache__/__init__.cpython-310.pyc b/text/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc4d39aad8465c13d7ce5f527f8241f8303f65b9 GIT binary patch literal 2089 zcmd5-Pj4eN6t_K-Wb&`uZC8bg%Loai(JqDWz%X2yx_ zEZb;uq5T5Dr`WIL6A~xR+&J-`Ct;Jo0VEDsvgi5Pet&=OjqLY31V;7p>+2tzg#3b= z&Edi36PTLA!pQ~Ul(Pvf*af3VdlRqlFMObV-nb{CA^bafM(*fElQ(xbE#Bh6J-G;Y zn|I*b=3U-{Z-@8!9(=p}!8wWc7mvehC5v!u%v2BKc$Ay*{A!TNBEAto=SIdR&7@JX z41Wc)NJMRrO+;D>rGakg+u}-2bVl)THP8_s<};Z32$q~wj1 z6NR^4dzKBJTAEv`9m6fZe-w{pA>wo@a`j>S;x-$ns(7K~w<0q-Hb#GtPV?A^S0+x9 zyv$9K45qiXbHek?fCJN0h%xz%!E!_tM*d&eNGn#`_{JdB01+G8Hf~XD3yCf)8R}XO zRE}|QF&F?bB1>i<4i&v)ORw_I$Ypow8(R6tWZ8hIG)DAexHD^45SxgmPavevWcgAk zqr)`RM&;!wl-D8p92$u{F`J7r6UhDiOoy3NO6aL9IbMY;W-P*0hQhpbTYNp}gfKo; zBRzCf=wuGpI^bbeE8bSKy1Pn$Ql^E_!_a-4U7&1r7GBTG%;d7vC_&BNs0JI9oR7v% z)|bM}Rr#k8U*y`Lpu99<1o-;L{y1R~v)#n$PtZr(PioH-Ba@s*P3za}E%R-#j-l;r zylvJ`ZF?mm=R29V`zQ=WuiqmSbPpCk=&*nWv`2la9sy$ucZd<34i^VNV3FSgF0hc7 zw4#uEZ}Id0F!BFsvi=ei>;&g9HAZZN{i3;~P%reD{NTEUOF%?bAI>ADu)Y+=Z#OZW zlL^4cHGp^FSYxmsk;Rib?c8BwGjD^FiOkZ8J_Ta^u-3(FZ}XAb{~K*@Z{FAx`wr?x zr}kO^gSUX#av4uNm<|SLC&%Oi2rfIWO)zK*UPUVpS^}N<4i;?jcoP;owSSdnH>+}W zox)|V>rI$=ZoC~FaZs=FprJU%5;^OK-M_d>$VCW;5Qc{qMy|3AlE?L0}`U9J} z1M-?E-1XWiS?^g%(~@e-u+#5fgjsIFT$p$|eI9)Prx1Vg8w{c_C;!5FTCmFc zH>Ro@%0XAEsks*0K%xy(!eClIs2U?=qtgRoM3&4zgbI4gmQLZEk&C0HYiQvflVuI! zRU6UwVbAQKV30H#KY?Jskoh-48STfuHY&|WzP$3$W8X;RiP>D_i9qhhXWCDsQbJE< z&T%VjF=OGca_6VH{l(M0mJiQ|YNY#?@~zDNS_d3VD#hDsR(n^#i1IiSy6@YMwF{K3 z*8Hn^o|shT8YQUt8`WTglJn8n%KB26xyt`E;*(Sx6qM#hi~xE6*dHq_U}Zb9`XltQ z>_(O65#TB1LA`V<_Lljjw~pbkwQ;sy9W9$H5h>rve7TRpQ1tpdB20H+BA&}y)PuJ} z)gkK2F;u3FSWl7}@fFh|5@+izijmxkbii)_NxsnZy%)3dH(Y zrHKtYn}^i?-{^X0bH}FGcVV+e!$@O~s<+^~b@9DrP8zZDH~D5WfDbYySiC==(^Q7Jlx>)R{d0A0?Vez z=b2Ek5f+&0ZD87>w@J5AvI;iLvC+ocqx4QP$>$Ssir1<}fT2kp=YSqK-hHp_HU9v5 CL(l-KMNkVz@IRmtO9! zXNIyRNI(JStq9P*w6D^m{!4$szV<0Eees(EH2uyjNs+P~18qt0%+Ai4neW^Wlll3o zgU{f}XIp={;5h%J$>gVu$w&CX3=$| zdr%UEL$_I$<%5cJdozbF-uk_2Z&nmV316&;%9q9F-0`Xw@dNP|#w)_xaB4r~fA<`RZK#aofxe1e4Gt=dUsX@$=ZK*JmER55E zVMabR3`_0AbZJu%2hNe=%jXcJvclzxl(r^qmd;~0#2tE{aX}eSL(W)nmL22tyh9H&Z^ha2{^~uZSo3#Fo9(ysW?j*VUlFPKnWtn{hEJzqB(v)J z$M-(@^v>GNjXO0j^Xl(q?$aYjErIpLPa9o$Y=fsVRPQ(L?*v`0`uA1RLyYK#F?yB5 zq!ek5kt;&8S@k*v2wiaFJshJeASK81u6$GaMx7%JXK|I^S2`>8Lk*SC&3$EO@Ku@# z%tR8H#Ll$IiW(toXemf?^gQ07X%Cz|Sj%$yecsE8Ao;8#3P^=ZsyXi2ekt&*`9wt8QX=>5?K|+DjJjKw)4|Fi90~dGFo}m(_eLx4MRP zXygeBiqt8DZ7Hcf!poDFpMy0bo@zP4y8%8;Fcw^O2g6G+rermc_L)1BRngL^7cB-! z40kgvleD-F!ti&*d5g#|zV(jtSk3R)cMODm=I-N02<-0OzePEerD2K+6{AE_NgQhv zg$6}$B%fx-%3zd~5~`I{Q;*9;z2;fHTd7Z#VqrRK{{h&nXH>ijdB(@zD#?{oBr)*d z35hEvrquKnwput~( z*PvM$p#iImgOHE0{B4ZokEdawN@V|2?IF{;ggYD=p|vCouRUzj>9)w97k^pHBs_MuXfMnlY!f^ zhaKW-O?^e4A}{5!8{kFl&8*yNp}}gk(D0zCMY_r>aRIA}6Y)W^nT|Af^QQMqph83c zby~wN7`>Ibi=HgXDr=vvKUiP8c{eM!dGH85S~h>jV!oA66}1ZLw?yWN+$QpKB9yx( zN`S2hRF)KFb47W^MwWGIMZToSh>C=m2vQraHc~6s`-vDt@&+|I`YuSpsm!B7AWG-l tsz2wJik>_AmcA}kM$evG`noz(@;$Hg@5)N0R4LeRxl*aj=HI#5{{a4dLH_^% literal 0 HcmV?d00001 diff --git a/text/__pycache__/cleaners.cpython-39.pyc b/text/__pycache__/cleaners.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9c57c6ab67db62cb7abcd73f17a81f0c0010130 GIT binary patch literal 4463 zcmcgvNpl;=6`lnfA#U0z(b_t4WDAi9*m71`QCX&yL{*BcnDQZ0a-ka2O<ADp$%gh1b*j_q~2ggU`)X9Xx;E zy|Vt#bB^;5s!Tp*R6fB^f8aWfZ~{lT!i(L&wWSw$w)6wvmW7~z(vOS1Vo-D)>F$<9 zVb2Z9vbn4V`q~N3h|}T>%Cq9ESVFlZ&WZCV&xvL64$AZ5 zg1CrsSzHpAQNAOt2!`^4_>s7Z@}hWG{21jWaZS93^0IheTt|6Dcxz6r#=r3#hpnr$ z$2!Jj`gWtyj!dWDs)uQ>@l4`7GHJu`Ff}Sos@2t4@)O2W4ltf9J@kB7W!DD8ko~%zS9rA>=B-1jMp^<{M zHd)5CwsRP8_4taAany^bGmf-j={gGsz*xe266gqwWLJl+`f9banMsv#g>EBNovo!k zIhr(Jb3bWgvCfLlWEibSVV(Uk?Y|!b-)0ttJdQWn3!Z>Y%KBQ~t}^yvon4iKSl>AQ8)oCT4$SSpNW(usTq@> zLc^{{2{w;33%Qmz?Recu`>|j=*3eWYxRD1K^=7Y?#u|pfhM#~%j9^1yho&or8N61n zeoKD-Xy46e`Xgd_H3I|Li?*cND#~P~ub*tqD%pd4cU^(2Q0sXDg z)7CZpw618yZ%Ner+*2|y!_c!RmU;ETZy$aB#r@TLYxiqj?$tlY-Ix20n#a)Z{?O

?q+L3;bck?2|pw^a2P=u^ zY(!8VMjF~SOk<)D!v+B>_ts5NfV-P1d+IVB6bv z_WT{^d12dGbN1awIQ01@Z?!-v8U#}{gPFOn`z`bp=$P`Nl5P1iKY_E1c_@1_G1d$Q zVu8k=kuNjE)#;@;dNd(&+*x+AuUx`ay2&5LpRLaKdN zG1L$5Ib%D?{oCl(w2KYmkZ+;r55%WyTiLuWxZUm!-?;|j#xEip1N<9##V}1^Zl($U;yR3? zbYd6s`Zw=#2EBt_b{qq`uiPE%3Cix)<4-B%vMkDw&=O=+%AjL?A}=9Fju>iopbkbc zETz;+Dfl3()N7vAydvJm@|d3v@|sv!+wdxG#Z@={CzFfEFfp*@2}U139&w>Oczjx$ z1dU`p%4c<)K56nT49-tZ$Gu~mu(pT`nXhBHfXz*OPn`=L@7M@`g9=$XT@ClVgR>bi zGX8*>DRhh5P8%0TQ4qz~z9=m_i0-9r7rfnV*iNnd-P#oI$y9z5$u5lh!rosdT%1{7 z)nHI(H`qjmLT!+p`ULlHuX5*!B3k=iUPMQf1;wPBwzSO|wQ^n_280Rak&EXi+i}|B zaq}Q&<}(N1WFh;W_2{57XqNi0d!0Ody}9)#Ow;7PjyDT~p?8T4=Vxenv#RM>4_u%N zlYu&+cHu*v1*=Z~K%M)=t!Z_x;zoc(ZLd?`U!4Ojss^wOR9CQ;mY+SMr)J2F`iRC^ zKl&+s5$A!hk#-S72-Dd5UZa0teSXD*N<8&TwEVwRqJECWYD2)w#{zERtlt9q+z2#= zO0LG(zd3^a_799b4a2u!zc9j%p}6NJ(Eb?z-VywFeqj9gAM+Oc%Om_4Dw8ncWAuMH zg8uGt=qXCu zUSoN=*~G=K+04syGe-~|+z{wqsy-p%L*z4UX&%~}d$16y$j~3Q=I|IvfQk=9@!88}>HbO=+V0x@tx~)cFck%r z@(_?`5vhm|il8Vy5(;T%YVhP`FUIx1@Sn^;mn9G&bg#%5=pxg2{6N#T@BE=aH* zw_Mo3l)Bpdyf3TH1FYDLGa=s0hdS01mbt~qh?RPE_AXV2B0ufK4y;nL+RSFc_7NR3{Z_?rAm zb4wr?QnedlofEpSCQ-p%?nnv(S$tn~N|2gXts;!Q=D-&cFx^!xI>VF&Kvl zn8Z7ffGL>9QJ96~dJ>-D7(9m;I1aDiCCtNXoP-p-frX83cngcLlwN^lSlL|J=!bVW z3?JYle1TPrVmo}s4(!A(OyWJfkKI-(g+1^TX7IuHyZ8_vVH|t04+k)TgE*auX4*3y zna*riwkO-0jpHX=!Uas>BEHW~WoNPzR;=HO#jRMM6&tc*!zJJ5N`5V`nk-*ey3KTn zqO5vF(d0x;lFjX+W`w-dkf@@DOt%;yl#;M+&-0b2Xp+eV6rGx`yi;a@5|T-)>29d6 ztf~=ftLK<-vHid;_){+U)gTchjVR%RLVd*Rm%_pNu-Z(#R2L}K4@jCKNzsDk@MdzGgF-&Rpn7J2Q6fXu(YZQ&CVU z4*_{Af`|`5P!t~tk2L4h;L6QxjN`v>qf^4db8^4$J0~aS+}wM_qM|~VV?#gI$t}0b z^*a~*ZyqN&?i?crvWs`g4A01J-Yt80kL=~WglS+%9&xuZyIc-?w%MCwKA+@|`{cX{ zhWE?)Aylx3wZZcjLUk7}UA}Ua7sHZF>Kh`; zwZ>>n)%5FegPJ#PCYqYfpH5pyLl<LmW=HK*kDW@}sct*fZ>I)|zs(f?T3WFfwx*=O z@(F^hNrGT`1znW+9fEGE5;aAtXe#d)qJ&ZsH=O*o5)@suyivuVmT&u~tUN`PNrLx> zf@S5ELRH0#D_-C{;1>J|axs;1DxspTALN2@EkPuz2{pALE^J7gp%WrSxVpL!7bm)A yaMdD}BI1EuBDN*D+yZdhvjTdP 2: + return match + " dollars" + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = "dollar" if dollars == 1 else "dollars" + cent_unit = "cent" if cents == 1 else "cents" + return f"{dollars} {dollar_unit}, {cents} {cent_unit}" + elif dollars: + dollar_unit = "dollar" if dollars == 1 else "dollars" + return f"{dollars} {dollar_unit}" + elif cents: + cent_unit = "cent" if cents == 1 else "cents" + return f"{cents} {cent_unit}" + else: + return "zero dollars" + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return "two thousand" + elif num > 2000 and num < 2010: + return "two thousand " + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + " hundred" + else: + return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") + else: + return _inflect.number_to_words(num, andword="") + + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r"\1 pounds", text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text diff --git a/text/symbols.py b/text/symbols.py new file mode 100644 index 0000000..7018df5 --- /dev/null +++ b/text/symbols.py @@ -0,0 +1,17 @@ +""" from https://github.com/keithito/tacotron + +Defines the set of symbols used in text input to the model. +""" +_pad = "_" +_punctuation = ';:,.!?¡¿—…"«»“” ' +_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +_letters_ipa = ( + "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" +) + + +# Export all symbols: +symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) + +# Special symbol ids +SPACE_ID = symbols.index(" ") From e674b08eb98977e49bef889848d20f48703393f2 Mon Sep 17 00:00:00 2001 From: Ankush Rana Date: Tue, 6 Aug 2024 14:51:16 +0200 Subject: [PATCH 02/19] add gitignore --- .gitignore | 6 ++++++ __pycache__/main.cpython-310.pyc | Bin 3830 -> 0 bytes __pycache__/main_alex.cpython-310.pyc | Bin 3048 -> 0 bytes 3 files changed, 6 insertions(+) create mode 100644 .gitignore delete mode 100644 __pycache__/main.cpython-310.pyc delete mode 100644 __pycache__/main_alex.cpython-310.pyc diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5c9ff48 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*.wav +*.pth +*.json +models +venv/ +__pycache__ \ No newline at end of file diff --git a/__pycache__/main.cpython-310.pyc b/__pycache__/main.cpython-310.pyc deleted file mode 100644 index feea6255c357618f85f5ad67887739e1512a62d3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3830 zcmZ`*&2JpH6(8=*&aPG?X|?+BhpcEM`D<c}tvG?*;^ibs1Lr-ZAeUG!d8ae3<_((oJ@;yHO z-XoKlnVJFLUtj!s{~ybS@lWg&zgg&f1Vvmf83r{diHrg92*GwKDh*7})NM1eJPUYc zRQAfcwmn6-^Hu&(U+s(ad1ho7HVQnj6%;Ix!ew^RG$2K`Sc; zt*#i}0;}~GP8Qk1De;z=$rfG{Z<$Vc7c~DOTVfYz?bP%x75dZAzf2uE^QGafu$9-w z$(2*XTV?OiS+@GR1R70m4T=k89m)ojO}5&<%74d}Pp+OCV8_(SHC?avuTyhK_*XE# zCcc948_>FbN`3=tc&>L7dQFfp_@CKLI(Ny4OUPlXJ`_Dd_}9?d5MPhxHuc;MZ_~R4 ze6Ryn{{~y$8c^4Bfv$V&h5giFKevGkc7p8Ru;pz4-PBfZ(fLD@|DDwt&fLQNF^8=| zZ=G$hO@=eKptPX0p>&|!VXL(9#_{gb1-b}c+Gh9Y(qZWjgika3-_z~lo(;nbTqTLrsoXxk}oxI;U3Z?M7K{mU` zg!DDDoyuM|oiCu}yJ*$;0|XmWP#!@MJpyWFL8YX$CgS&_D^o=ly>UPtu} zdh<y~$!U6KiI5EH!as;eqX{Tu_OY$L8)}x#o#kA=ew;=FBz~- zkg|~9?d%?RdjTKp@}$qYQgoygtso6MgCLCC>9LvvQ~;c$zn3Ht3*rsbjYc>mYmTAG z57K}G?%MaGq#H!yLHn&)*-Wbyg=4r0MLvA~B-=)g8yp2;6zoOJO*v~x4tg1N!3;Nu zse6=k0ma-MVYyj`9UiAlmE(XP2vr6>LaGX7`@t}h%IdKw&74+C%lmOKVA)kzD~Gx0 zu(%WTMao#W*O5UtkvxgFTaO<<%3kb%tefn+Nvtc``=LR(5pEc}`w1Te(!C!A@xc%x z^+D%8#9$zpgw~nv2ThaDgRl4^k|iM7Y^yceQ*PaDwDB#yoPnnWc;T(xqZjia^;5_e z*=5Wbu1s?8xcQK~A4aSVinUIyT`QhAMNeHgKUL2u+NOp6I2Jl*N3OvA#l9O#HxzE1 zNEiH%$F?=HWGI+$KmL^)i18NM?ylaGI|vS$D~6o8vIibKPKMx5A%iF?>^BPcc)*X{ zJqC_u?#<7FBlhXeP7}8^*^%C0wshvtqH8vO-1MT0vc*;lpaAx8;iCg`r*8c6*(W=C zk9f-N0ALl473kXhwUTmM-DyKb1C$+fd; zs|Ako2Whs6QHw*+cXy9vFNt%-DNnjgh%i2AH_HVmH4sk=6HwLimAXS3Wb?(f9=&+{ zu&v`E^tdHtq_OJ8C8W0qSKa|eaH`n0~X6& zoyC+BQ0l{|<|$K1uBJxd>ISNs$2YXhFqXUw>#EWq1(9?Jm+&x_s=N=zKsLghsg{18 zXUTjpkb>NEM0;!T}*&?OFZ+~Z1$9?YGq3C}mp07YD@)*4)XecrAW_PcgV^F{G7+(XZ?r zeh@O1;ZfL4c&to5jQKXK$?qX~7s-1_@DeT%ecTqV`zILCSL+qRe+q3?^?l^~{<2BM~m zFAa-*9?CH+wIQ=Kc^PA}$R7pcu~4$X86veL@%a|6+CtJs(m`?u$z34I1SCw33a9wK znuAAx4=L7{J$SfN0T2ep0SA3~ZlpN}?mT!^4HzVQT7ZK_hsHz|9}>feJ;1{kzX#&L zqpNB{0vP>H$srDDSje@^I;oSYUHhO$tdd3Y-!7wU-E5G{q+V)}nrRdK>!en~x@J10 ZG)WhjD#kN0#n}I;)@xRc*@bfeaW3MTBw^%w;l~?%f@7c4pk& z%f~=I$+v(XketY44*3)N1Kd}i4ET^+fWUxU)w9bT8wikIR84htbys!0db3W)(O~@W zufL6o75L($I$AJ!3cz3JnnpCDCz@Y=bzfI=!#C8|^ey$R`8D_&i9N0Rbsc5Rq%m#! zO`t8`AvM26Y`;zFeuvifyJtP>(AK%(uUyR9Fk8Ktbzs&f4buEZ^VjIwTkUN9T=O^R zd&Hp|Zw)wA_b&ll2G|7H0=NQj72q1(*oU>5&JK|BF9F{4Hy3At~tlDepo#s#Md97os{PD|9$krhr?G)RxERL)t2U461v;jF*W z$Od^2_FW>EPwlzBP2hGEcdNp^GMC*TS1a6Wq6vDggYUv;><`sXvzH3FVL|wIn;D3<&-BpBlsH{P*$ymnvbL24O;vhxv)@ zMOk`CnFwBISwh2f6AeI1+IrzF5|fSd5DJ40KS{DEO!%k6-|Q;dgMrGxAP>c)xV#J4 z6`A{TXXh`jT5v~kLWk-aCpndMLPy~&5z?B_Brm#y!C@9b3bXQn^D=&~WfA2(PRB#9RyqLhE^BGX#$47Q z5?m;s6wUdO5P{qO^E@$uZbR_6!YSpW83~Kk>W1fE{{Goe#WW9&%4YNx8;5l8_&` zYE8vJMT<|eqkvEm#tAR3d=4FrLC>7WsN}nOIHsj=PH8m3{zl38yKiy)^kfW5Vf_0^ zvHpA#reo^jdNffVGAufSfuPeI&z&(k{>R^OeE+1lwuh;)T*1r7>4-9#MxYMj$Y_#< zq*(o<`a%l0(04%r_pI{zYoJ$Uth%|^mJNuVDk<50#>TTLO~tONFxd_X31C(6qzR#w zjk2<$P&$hu0lEW?dYSan!upkFoC;=wHdz~iO+s#bR+jb49N&^KDc=;&VECj;5uuIRVtr8-`^Vdik?|w(KSC0Byf>VAX!tH~M-Pr0jQHH7{}7urDM^ zx$0_3TLyDMQPIWhc7L}RUQ;^D1|!=GM_{{=EAu9RZS=4&x|esWjOvWgZ;N9wK^#=zT=k(kn=X#=^rflU0wtsX7f#4Y(bYB#p5!#{du@6>CbMV)Y>8|s?xob9$_cIR{|46wlLIT5{H~}b{qDw zJBaQgx`*gKq6dJa2?4zDFy1K$WDj170NP+sHc}4R2aGWbNl^ALQgXNm;Z>bdXyv35 zMHN&P%El+)%2`4`#m>tA3aEkgrG9Orty`7_wPxvEqYJgvFr9&;<9zvZOsFSVbD*va Vv#iABDpcQp8l8sa=;hD)`4`vYVjTbg From 9304c438b6176bb3da626b2b38e7b09e87d3ce07 Mon Sep 17 00:00:00 2001 From: Ankush Rana Date: Tue, 6 Aug 2024 15:08:39 +0200 Subject: [PATCH 03/19] modify gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 5c9ff48..6ef005c 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ *.json models venv/ -__pycache__ \ No newline at end of file +**/__pycache__/ \ No newline at end of file From 289dcf2680c76ae3303c5fe2c171012d0288b610 Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Tue, 6 Aug 2024 15:09:33 +0200 Subject: [PATCH 04/19] upload files --- models/matxa_onnx/spk_ids.json | 49 ++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 models/matxa_onnx/spk_ids.json diff --git a/models/matxa_onnx/spk_ids.json b/models/matxa_onnx/spk_ids.json new file mode 100644 index 0000000..69f2405 --- /dev/null +++ b/models/matxa_onnx/spk_ids.json @@ -0,0 +1,49 @@ +{ + "cam_03115": 0, + "caf_04247": 1, + "caf_05450": 2, + "cam_08935": 3, + "caf_09901": 4, + "ona": 5, + "pol": 6, + "cam_02689": 7, + "caf_06042": 8, + "jan": 9, + "caf_08106": 10, + "cam_04910": 11, + "cam_08664": 12, + "caf_07803": 13, + "cam_06582": 14, + "caf_06311": 15, + "caf_07245": 16, + "cam_06279": 17, + "caf_09598": 18, + "caf_09796": 19, + "eva": 20, + "cam_00762": 21, + "caf_09204": 22, + "caf_03944": 23, + "caf_05147": 24, + "uri": 25, + "mar": 26, + "cam_00459": 27, + "teo": 28, + "caf_03655": 29, + "bet": 30, + "cam_06705": 31, + "caf_05739": 32, + "caf_06008": 33, + "cam_04484": 34, + "cam_03386": 35, + "cam_08967": 36, + "caf_06942": 37, + "cam_07140": 38, + "pau": 39, + "caf_08001": 40, + "pep": 41, + "cam_04787": 42, + "eli": 43, + "caf_01591": 44, + "caf_02452": 45, + "cam_02992": 46 +} \ No newline at end of file From 0e208c718994275e89d3bf21ae0c3e38d853d3bb Mon Sep 17 00:00:00 2001 From: Ankush Rana Date: Wed, 7 Aug 2024 11:24:30 +0200 Subject: [PATCH 05/19] modify docker file --- .dockerignore | 36 +++++ .gitignore | 1 + Dockerfile | 11 +- Dockerfile.dev | 35 ----- Dockerfile.test | 30 +--- charts/aina-tts-api/Chart.yaml | 16 --- charts/aina-tts-api/templates/deployment.yaml | 35 ----- charts/aina-tts-api/templates/service.yaml | 15 -- charts/aina-tts-api/values.yaml | 6 - docker-compose-dev.yml | 18 --- docker-compose-gpu.yml | 4 +- docker-compose-test.yml | 2 +- docker-compose.yml | 11 +- main.py | 134 ++++++------------ main_alex.py | 111 --------------- requirements.txt | 18 +-- .../inference_onnx.cpython-310.pyc | Bin 6077 -> 0 bytes server/__pycache__/__init__.cpython-310.pyc | Bin 1253 -> 0 bytes .../exception_handler.cpython-310.pyc | Bin 1043 -> 0 bytes server/__pycache__/exceptions.cpython-310.pyc | Bin 725 -> 0 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 160 -> 167 bytes .../__pycache__/audio_utils.cpython-310.pyc | Bin 2507 -> 2514 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 155 -> 162 bytes .../helper/__pycache__/config.cpython-310.pyc | Bin 1605 -> 1612 bytes .../__pycache__/singleton.cpython-310.pyc | Bin 663 -> 670 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 156 -> 163 bytes .../tts_request_model.cpython-310.pyc | Bin 752 -> 759 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 154 -> 161 bytes .../__pycache__/argparse.cpython-310.pyc | Bin 769 -> 776 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 154 -> 161 bytes .../api/__pycache__/__init__.cpython-310.pyc | Bin 158 -> 165 bytes .../views/api/__pycache__/api.cpython-310.pyc | Bin 6186 -> 6193 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 1379 -> 1386 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 156 -> 163 bytes .../__pycache__/workers.cpython-310.pyc | Bin 2329 -> 2336 bytes text/__pycache__/__init__.cpython-310.pyc | Bin 2089 -> 2096 bytes text/__pycache__/__init__.cpython-39.pyc | Bin 2126 -> 0 bytes text/__pycache__/cleaners.cpython-310.pyc | Bin 4145 -> 4152 bytes text/__pycache__/cleaners.cpython-39.pyc | Bin 4463 -> 0 bytes text/__pycache__/symbols.cpython-310.pyc | Bin 728 -> 735 bytes text/__pycache__/symbols.cpython-39.pyc | Bin 754 -> 0 bytes 41 files changed, 107 insertions(+), 376 deletions(-) create mode 100644 .dockerignore delete mode 100644 Dockerfile.dev delete mode 100644 charts/aina-tts-api/Chart.yaml delete mode 100644 charts/aina-tts-api/templates/deployment.yaml delete mode 100644 charts/aina-tts-api/templates/service.yaml delete mode 100644 charts/aina-tts-api/values.yaml delete mode 100644 docker-compose-dev.yml delete mode 100644 main_alex.py delete mode 100644 scripts/__pycache__/inference_onnx.cpython-310.pyc delete mode 100644 server/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/__pycache__/exception_handler.cpython-310.pyc delete mode 100644 server/__pycache__/exceptions.cpython-310.pyc delete mode 100644 text/__pycache__/__init__.cpython-39.pyc delete mode 100644 text/__pycache__/cleaners.cpython-39.pyc delete mode 100644 text/__pycache__/symbols.cpython-39.pyc diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..e5341f8 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,36 @@ +# Ignore the virtual environment directory +venv + +# Ignore Python cache files +__pycache__ + +# Ignore environment and dependency files +*.env +*.log +*.tmp + +# Ignore configuration and metadata files +CODE_OF_CONDUCT.md +CONTRIBUTING.md +docker-compose*.yml +LICENSE.txt +Makefile +README.md +SECURITY.md + +# Ignore specific scripts and files +infer_wavenext_onnx.py +_main.py + +# Ignore any other .md files +*.md + +# Ignore any other temporary or unnecessary files +*.swp +*.bak +*.tmp +*.orig + +# Exclude specific files or directories if needed for the Docker build +# !important_file.py +# !important_directory/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6ef005c..65b5853 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,6 @@ *.pth *.json models +!models/matxa_onnx/spk_ids.json venv/ **/__pycache__/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index aa271f9..b4348a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ RUN apt-get update && apt-get install -y \ autoconf \ automake \ libtool \ + espeak \ pkg-config \ git \ wget \ @@ -22,15 +23,13 @@ RUN pip install --upgrade pip && \ make && \ make install -RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c +RUN which espeak WORKDIR /app COPY ./requirements.txt /app -RUN python -m pip install --upgrade pip -RUN python -m pip install --no-cache-dir -r requirements.txt +RUN pip install --upgrade pip +RUN pip install --no-cache-dir -r requirements.txt -RUN wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P /app/models/vits_ca/ -RUN mv /app/models/vits_ca/best_model_8khz.pth /app/models/vits_ca/best_model.pth COPY . . -ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --mp_workers ${MP_WORKERS} --use_cuda ${USE_CUDA} --use_mp ${USE_MP} +ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --use_cuda ${USE_CUDA} diff --git a/Dockerfile.dev b/Dockerfile.dev deleted file mode 100644 index 2b5a7cd..0000000 --- a/Dockerfile.dev +++ /dev/null @@ -1,35 +0,0 @@ -FROM python:3.10.12-slim -# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* - -# Install required packages for building eSpeak and general utilities -RUN apt-get update && apt-get install -y \ - build-essential \ - autoconf \ - automake \ - libtool \ - pkg-config \ - git \ - wget \ - cmake \ - && rm -rf /var/lib/apt/lists/* - -RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng - -RUN pip install --upgrade pip && \ - cd espeak-ng && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make && \ - make install - -RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c - -WORKDIR /app -# RUN wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P /app/models/vits_ca/ -# RUN mv /app/models/vits_ca/best_model_8khz.pth /app/models/vits_ca/best_model.pth - -COPY ./requirements.txt /app -RUN python -m pip install --upgrade pip -RUN python -m pip install --no-cache-dir -r requirements.txt - -ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --mp_workers ${MP_WORKERS} --use_cuda ${USE_CUDA} --use_mp ${USE_MP} --show_details True --reload \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test index 1f840aa..4fd10d9 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,34 +1,10 @@ FROM python:3.10.12-slim -# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* - -# Install required packages for building eSpeak and general utilities -RUN apt-get update && apt-get install -y \ - build-essential \ - autoconf \ - automake \ - libtool \ - pkg-config \ - git \ - wget \ - cmake \ - && rm -rf /var/lib/apt/lists/* - -RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng - -RUN pip install --upgrade pip && \ - cd espeak-ng && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make && \ - make install - -RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c WORKDIR /app COPY ./requirements.txt /app -RUN python -m pip install --upgrade pip -RUN python -m pip install --no-cache-dir -r requirements.txt -RUN pip install pytest httpx pydub pytest-repeat +RUN pip install --upgrade pip +RUN pip install --no-cache-dir -r requirements.txt +RUN pip install pytest httpx pydub pytest-repeat --no-cache-dir ENTRYPOINT pytest diff --git a/charts/aina-tts-api/Chart.yaml b/charts/aina-tts-api/Chart.yaml deleted file mode 100644 index 51a3af3..0000000 --- a/charts/aina-tts-api/Chart.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v2 -name: aina-tts-api -version: 0.1.0 -description: RestFUL api and web interface to serve coqui TTS models - -home: https://github.com/projecte-aina/tts-api -keywords: - - ai - - tts - - coqui - - cat -maintainers: - - name: Projecte Aina - email: aina@bsc.es -sources: - - https://github.com/projecte-aina/tts-api diff --git a/charts/aina-tts-api/templates/deployment.yaml b/charts/aina-tts-api/templates/deployment.yaml deleted file mode 100644 index ad03f62..0000000 --- a/charts/aina-tts-api/templates/deployment.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tts-api - namespace: {{.Values.global.namespace}} - labels: - component: tts-api -spec: - selector: - matchLabels: - component: tts-api - template: - metadata: - labels: - component: tts-api - spec: - volumes: - - name: dshm - emptyDir: - medium: Memory - sizeLimit: {{.Values.api.dshm_size | default "2Gi" | quote }} - containers: - - name: api - image: {{.Values.api.image}}:{{.Values.api.tag}} - imagePullPolicy: {{ default "IfNotPresent" .Values.pullPolicy }} - ports: - - containerPort: 8000 - env: - - name: "SPEECH_SPEED" - value: {{.Values.api.speech_speed | default "1.0" | quote }} - - name: "MP_WORKERS" - value: {{.Values.api.mp_workers | default "4" | quote }} - volumeMounts: - - mountPath: /dev/shm - name: dshm \ No newline at end of file diff --git a/charts/aina-tts-api/templates/service.yaml b/charts/aina-tts-api/templates/service.yaml deleted file mode 100644 index 01476ec..0000000 --- a/charts/aina-tts-api/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: tts-api - namespace: {{.Values.global.namespace}} - labels: - component: tts-api -spec: - ports: - - name: api - protocol: TCP - port: 8000 - targetPort: 8000 - selector: - component: tts-api diff --git a/charts/aina-tts-api/values.yaml b/charts/aina-tts-api/values.yaml deleted file mode 100644 index 3f49414..0000000 --- a/charts/aina-tts-api/values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -global: - namespace: apps - -api: - image: projecteaina/tts-api - tag: latest diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml deleted file mode 100644 index bab2de2..0000000 --- a/docker-compose-dev.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: '3.9' -services: - server: - build: - context: . - dockerfile: Dockerfile.dev - container_name: fastapi-dev - environment: - - SPEECH_SPEED=${SPEECH_SPEED-1.0} - - MP_WORKERS=${MP_WORKERS-4} - - USE_MP=False - restart: always - volumes: - - .:/app - - ports: - - '8080:8000' - shm_size: ${SHM_SIZE-2gb} diff --git a/docker-compose-gpu.yml b/docker-compose-gpu.yml index c434532..1074a5f 100644 --- a/docker-compose-gpu.yml +++ b/docker-compose-gpu.yml @@ -5,12 +5,10 @@ services: context: . environment: - SPEECH_SPEED=${SPEECH_SPEED} - - MP_WORKERS=${MP_WORKERS} - USE_CUDA=True - - USE_MP=${USE_MP} restart: unless-stopped ports: - - '8080:8000' + - '8000:8000' shm_size: ${SHM_SIZE} deploy: resources: diff --git a/docker-compose-test.yml b/docker-compose-test.yml index 139c009..63f1504 100644 --- a/docker-compose-test.yml +++ b/docker-compose-test.yml @@ -10,5 +10,5 @@ services: - .:/app ports: - - '8080:8000' + - '8000:8000' shm_size: ${SHM_SIZE-2gb} diff --git a/docker-compose.yml b/docker-compose.yml index 59a5fea..5a81d57 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,11 +3,10 @@ services: server: build: context: . - environment: - - SPEECH_SPEED=${SPEECH_SPEED} - - MP_WORKERS=${MP_WORKERS} - - USE_MP=${USE_MP} restart: unless-stopped ports: - - '8080:8000' - shm_size: ${SHM_SIZE} + - '8000:8000' + shm_size: ${SHM_SIZE:-2g} + environment: + - SPEECH_SPEED=${SPEECH_SPEED:-0.9} # Default to 0.9 if SPEECH_SPEED is not set + - USE_CUDA=${USE_CUDA:-0} # Default to 0 if USE_CUDA is not set diff --git a/main.py b/main.py index 8ab7740..fcca449 100644 --- a/main.py +++ b/main.py @@ -1,89 +1,67 @@ -from pathlib import Path -from TTS.utils.manage import ModelManager -# from lingua_franca import load_language # Lingua franca - import argparse import uvicorn import torch import multiprocessing as mp -import sys +from pathlib import Path import os from server import create_app from server.utils.argparse import MpWorkersAction -from server.utils.utils import update_config -# Set global paths -# Determine the current script's directory and set up paths related to the model -path = Path(__file__).parent / "server" /".models.json" -path_dir = os.path.dirname(path) - -# Initialize the model manager with the aforementioned path -manager = ModelManager(path) +path_dir = os.path.dirname(Path(__file__)) # Set the relative paths for the default TTS model and its associated configuration -models_path_rel = '../models/vits_ca' -model_ca = os.path.join(path_dir, models_path_rel, 'best_model.pth') -config_ca = os.path.join(path_dir, models_path_rel, 'config.json') +models_path_rel = f'{path_dir}/models/matxa_onnx' +model_name = 'matcha_wavenext.onnx' +vocoder_name = 'matcha_wavenext.onnx' +spk_ids_file = 'spk_ids.json' -# Load lingua franca language -# load_language('ca-es') +model_ca = os.path.join(models_path_rel, model_name) +vocoder_ca = os.path.join(models_path_rel, vocoder_name) +ids_file_path = os.path.join(models_path_rel, spk_ids_file) def create_argparser(): def convert_boolean(x): return x.lower() in ["true", "1", "yes"] - - # Create an argument parser to handle command-line arguments - # The parser setup seems incomplete and might be continued in the next section of the code. parser = argparse.ArgumentParser() - parser.add_argument( - "--list_models", - type=convert_boolean, - nargs="?", - const=True, - default=False, - help="list available pre-trained tts and vocoder models." - ) - parser.add_argument( - "--model_name", - type=str, - default="tts_models/en/ljspeech/tacotron2-DDC", - help="Name of one of the pre-trained tts models in format //", - ) - parser.add_argument("--vocoder_name", type=str, default=None, help="name of one of the released " - "vocoder models.") # Args for running custom models - parser.add_argument( - "--config_path", - default=config_ca, - type=str, - help="Path to model config file." - ) parser.add_argument( "--model_path", type=str, default=model_ca, - help="Path to model file.", + help="Path to ONNX model file.", ) parser.add_argument( "--vocoder_path", type=str, - help="Path to vocoder model file. If it is not defined, model uses GL as vocoder. Please make sure that you " - "installed vocoder library before (WaveRNN).", - default=None, + help="Path to ONNX vocoder", + default=vocoder_ca, ) - parser.add_argument("--vocoder_config_path", type=str, help="Path to vocoder model config file.", default=None) - parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", default=None) + parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", + default=ids_file_path) + parser.add_argument("--unique_model", type=bool, help="set to True if the model is a TTS+Vocoder", + default=True) parser.add_argument("--port", type=int, default=8000, help="port to listen on.") parser.add_argument("--host", type=str, default="0.0.0.0", help="host ip to listen.") - parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, help="true to use Python multiprocessing.") - parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, help="true to use CUDA.") - parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=mp.cpu_count(), nargs='?', const=mp.cpu_count(), help="number of CPUs used for multiprocessing") - parser.add_argument("--debug", type=convert_boolean, default=False, help="true to enable Flask debug mode.") - parser.add_argument("--show_details", type=convert_boolean, default=False, help="Generate model detail page.") - parser.add_argument("--speech_speed", type=float, default=1.0, nargs='?', const=1.0, help="Change speech speed.") - parser.add_argument("--reload", type=bool, action=argparse.BooleanOptionalAction, default=False, help="Reload on changes") + parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, + help="true to use Python multi-processing.") + parser.add_argument("--use_mth", type=convert_boolean, default=True, nargs='?', const=True, + help="true to use Python multi-threading.") + parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, + help="true to use CUDA.") + parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=1, # mp.cpu_count() + nargs='?', const=1, help="number of CPUs used for multiprocessing") + parser.add_argument("--debug", type=convert_boolean, default=False, + help="true to enable Flask debug mode.") + parser.add_argument("--show_details", type=convert_boolean, default=False, + help="Generate model detail page.") + parser.add_argument("--speech_speed", type=float, default=0.9, nargs='?', const=1.0, + help="Change speech speed.") + parser.add_argument("--temperature", type=float, default=0.4, nargs='?', const=1.0, + help="Set temperature of inference.") + parser.add_argument("--reload", type=bool, action=argparse.BooleanOptionalAction, default=False, + help="Reload on changes") return parser @@ -96,50 +74,28 @@ def convert_boolean(x): speakers_file_path = None vocoder_path = None vocoder_config_path = None -# new_speaker_ids = None -# use_aliases = None - -# CASE1: list pre-trained TTS models -if args.list_models: - manager.list_models() - sys.exit() - -# CASE2: load pre-trained model paths -if args.model_name is not None and not args.model_path: - model_path, config_path, model_item = manager.download_model(args.model_name) - args.vocoder_name = model_item["default_vocoder"] if args.vocoder_name is None else args.vocoder_name - -if args.vocoder_name is not None and not args.vocoder_path: - vocoder_path, vocoder_config_path, _ = manager.download_model(args.vocoder_name) +new_speaker_ids = None +use_aliases = None # CASE3: set custom model paths if args.model_path is not None: model_path = args.model_path - config_path = args.config_path speakers_file_path = args.speakers_file_path - speaker_ids_path = os.path.join(path_dir, models_path_rel, 'speaker_ids.json') + speaker_ids_path = os.path.join(models_path_rel, 'spk_ids.json') if args.vocoder_path is not None: vocoder_path = args.vocoder_path - vocoder_config_path = args.vocoder_config_path - -# CASE4: change speaker speed -if args.speech_speed != 1.0: - update_config(config_path, args.speech_speed) app = create_app( - model_path = model_path, - config_path = config_path, - speakers_file_path = speakers_file_path, - vocoder_path = vocoder_path, - vocoder_config_path = vocoder_config_path, - speaker_ids_path = speaker_ids_path, - speech_speed = args.speech_speed, - mp_workers = args.mp_workers, - use_cuda = args.use_cuda, - use_mp = args.use_mp, - show_details=args.show_details, + model_path=model_path, + vocoder_path=vocoder_path, + speaker_ids_path=speaker_ids_path, + speech_speed=args.speech_speed, + temperature=args.temperature, + mp_workers=args.mp_workers, + use_cuda=args.use_cuda, + use_mp=args.use_mp, args=args ) diff --git a/main_alex.py b/main_alex.py deleted file mode 100644 index af82e1c..0000000 --- a/main_alex.py +++ /dev/null @@ -1,111 +0,0 @@ -import argparse -import uvicorn -import torch -import multiprocessing as mp -import os - -from server import create_app -from server.utils.argparse import MpWorkersAction - - -# Set the relative paths for the default TTS model and its associated configuration -models_path_rel = '/home/apeir1/PycharmProjects/tts-api/models/matxa_onnx' -# model_name = 'matcha_multispeaker_cat_opset_15_10_steps_2399.onnx' -model_name = 'matcha_wavenext_simply.onnx' -# model_name = 'matxa_vocos_merged_HF_simplified_dynamic.onnx' -vocoder_name = 'mel_spec_22khz.onnx' -spk_ids_file = 'spk_ids.json' - -model_ca = os.path.join(models_path_rel, model_name) -vocoder_ca = os.path.join(models_path_rel, vocoder_name) -ids_file_path = os.path.join(models_path_rel, spk_ids_file) - - -def create_argparser(): - def convert_boolean(x): - return x.lower() in ["true", "1", "yes"] - parser = argparse.ArgumentParser() - # Args for running custom models - parser.add_argument( - "--model_path", - type=str, - default=model_ca, - help="Path to ONNX model file.", - ) - parser.add_argument( - "--vocoder_path", - type=str, - help="Path to ONNX vocoder", - default=vocoder_ca, - ) - parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", - default=ids_file_path) - parser.add_argument("--unique_model", type=bool, help="set to True if the model is a TTS+Vocoder", - default=True) - parser.add_argument("--port", type=int, default=8000, help="port to listen on.") - parser.add_argument("--host", type=str, default="0.0.0.0", help="host ip to listen.") - parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, - help="true to use Python multi-processing.") - parser.add_argument("--use_mth", type=convert_boolean, default=True, nargs='?', const=True, - help="true to use Python multi-threading.") - parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, - help="true to use CUDA.") - parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=1, # mp.cpu_count() - nargs='?', const=1, help="number of CPUs used for multiprocessing") - parser.add_argument("--debug", type=convert_boolean, default=False, - help="true to enable Flask debug mode.") - parser.add_argument("--show_details", type=convert_boolean, default=False, - help="Generate model detail page.") - parser.add_argument("--speech_speed", type=float, default=0.9, nargs='?', const=1.0, - help="Change speech speed.") - parser.add_argument("--temperature", type=float, default=0.4, nargs='?', const=1.0, - help="Set temperature of inference.") - parser.add_argument("--reload", type=bool, action=argparse.BooleanOptionalAction, default=False, - help="Reload on changes") - return parser - - -# parse the args -args = create_argparser().parse_args() -print("args =========", args) -# update in-use models to the specified released models. -model_path = None -config_path = None -speakers_file_path = None -vocoder_path = None -vocoder_config_path = None -new_speaker_ids = None -use_aliases = None - -# CASE3: set custom model paths -if args.model_path is not None: - model_path = args.model_path - speakers_file_path = args.speakers_file_path - speaker_ids_path = os.path.join(models_path_rel, 'spk_ids.json') - -if args.vocoder_path is not None: - vocoder_path = args.vocoder_path - - -app = create_app( - model_path=model_path, - vocoder_path=vocoder_path, - speaker_ids_path=speaker_ids_path, - speech_speed=args.speech_speed, - temperature=args.temperature, - mp_workers=args.mp_workers, - use_cuda=args.use_cuda, - use_mp=args.use_mp, - args=args - ) - - -def main(): - uvicorn.run('main_alex:app', host=args.host, port=args.port, reload=args.reload) - - -if __name__ == "__main__": - torch.set_num_threads(1) - torch.set_grad_enabled(False) - mp.set_start_method("fork") - main() diff --git a/requirements.txt b/requirements.txt index f491aa2..32a53aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,11 @@ -# tts==0.17.6 - numpy -scipy -soundfile -torch -onnxruntime -fastapi==0.103.2 -uvicorn[standard] +scipy==1.14.0 +soundfile==0.12.1 +torch==2.4.0 +onnxruntime==1.18.1 +fastapi==0.112.0 +pysbd==0.3.4 +phonemizer==3.3.0 +piper_phonemize +unidecode +uvicorn[standard] \ No newline at end of file diff --git a/scripts/__pycache__/inference_onnx.cpython-310.pyc b/scripts/__pycache__/inference_onnx.cpython-310.pyc deleted file mode 100644 index 9935f7ecd87d073f0cdac948b833f357ae500ead..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6077 zcmcIoTaP1074Ey+ZF|Nu!FT1s{|L_%VL1PBl;oU#J&^kdXF?yn+PbJJoHEcN8A6)zwv}PMx}) z`szEKHX4C}-{|i@=sC|A#^0zg|Er?#DLnE|NSMLQzA-R&Oq23f-x}CEw$9sqXUB!i z?t42P-cG+V@OOM&=k}|EU?(W`YCCnU*Vt(=k5wKRI}6NbRlFD377JMIfw|LUb=H7n ziP`us{LEsDyuQE8ntX*V@fKTVcF$)kto6X#Sw*Ww9`M$tq3fy6W2f2DBHukKIEd;>87J3<6;|v%4be8E^od@I$w%q)-|a` z`Lpn*=M_AXA<2z>Lk37q8GHdtbl?20iIT;v16QsL=wZ&;%twLFyqHrn2N9$_jx!N$t=9b!=XqIW5!wd4!lU+`JGaAxLnzY$*UhptJlc!pK?qCXQFO?AsTjzsf+l$S;oH$**pHK4bTPo1 zMA=C2up1>ws{2UzZj{A`{Pfw|LmnNFB6QBjuq!x<7~o!%gx8gOn08aCDtmD+-i?ye zkKOErC+R30NghH0v*03$`XQ+?E}jdAeI7~vTquXU8~09Vq}{zRO?bPa92&lIXwIrO zH$2sVwjN7|#-i$_wayTGdtOSS0atE6J>sHmDJK%UQu$ErP{q53Si=1O_0qLHtkbn< z$Yb%swOc1xM=`i1(tX~|dfzdf_bto$&$7L2dS+m5SdJ-HP}#MSOn*-yQ^BKM>sdG%Gske6v6I_l zw`%0(gwCCv8?kw2y=jlVab@i1US45N?zfC}qh*Y%nK`j?_skpzxg~yxl8wj7jWZLn z+RR36;+`4gc%`lSxG`SH>v;pV7D`)-Xj##%tGaa)c|UJXstoog0rRwUX{NQP%gb78 z`GVF8tBhOuQofS6TE>PkUWHaIUuFK8rStW?$*P+M(qPkoYz_5|oU|75wKKD2==@@l zM}0MSb7#X~?j>Wq4lO6g7$L3hH?cAsCyVeBUyS%-mnzU&*VvP1wQ~9y;T# zd@B#Ip7?*yVn)y2r1jg%FR}WDkvr(oT{p&;(XzF_y1$lhv3h>FXCZZU>L6Ww=#=f7 zteLOp>owTwIhzLdC77&Zg*Tvo2`gn!Hqo<-kv}oB_{0ZWoLo9HrnX?|$^1#Sbl^<3 z&Ws3l_Viv zCut02H|bC(!CpG-^m(!ipb9YOY2F@0yUILP=838i6qZ||ymDQ9*ap~P06sK3_u@z@ z@3?%Q6z}F-!$RVaq-ddYRZ5Gh2AMf8`q-#qR7QpeIILPv*ts9c3#Cw*Vxh~Ky}m5P&ax8oAkzezkK+|yWy)mA(BEo1V;kA zf^r4_Iel@ia>{eUo=69Csuvl^owg}H3G4-xv50Qk)7;0h}@r`LZd zc{fd%yb=Cuz3dcNNsVU}r}bh~;aBc_{ziD(c)ege)8cD~m`1-}6a`U+(`RmmoY*KV z5mTF;k#Gu1Pd|OSbv}eF9+1*`kMCNlG6Ys78F&sq28ZpW2O4_YRdJo>NW4Q3;m~J6 zT%%l@l8;eB_l0r?2P_utHDn0Llouy}cBz6A<58)a(?y)YoJa7KT^AoG69E-Uj9Z|* zo=FEQ12ZKsLRr~ZFSR@s%qyW{Y9N@C`y|(9Y?1X;g=|x%Hnl+okCb4&iUD_uIW!AQ zp0l)wyQDzB9xgmCRkcuofs2S$JrXh49jppWPJRW6Z&{{a^{ggHoo{;90=+FC53mTO z1xV>%ys2EbeA@#~P4#@_eQ2T;(xxR2VfHavSckyFqp|4^$YchP2sChF&XC9%yCltR zoJ%jmIVw++%}o3)I!&3A17ALC++D{hCE{pJT#!kS$903wBc1!qn^b@TJ`ube)XX&A zdMbCB*D@y65|e@~0490U6O2+d52huM(Ez8s25FGjrcy>{x3+yp+{NHkrF5Ud)4i+u zF4t#H?4Vw}LCKp)PF;Oo+O;`nCC3!sK%%M~4e9)IG))bMYd0@rUsc;lv0;3AM_hDT(h=))O4T zSVAM#Kf1aoQlR+!i+ChCO^z!I@M?h#;}Sv?pgI42Ym0cTo4XVV>^sP@XM}w^ zt_%@vMn#xfVDs&6JUqD?1R;K(Mu>Dn=ct4x;gw`~HDQqu(MkJ8__e7K+Q-r`;;Rvt z1azQ3yF7g3`4^rGU${&C(4JqNnWKdZuYkwE&@IaAkraNX2Xw9~MR*1D@3}A?7Gw`i zW^u3%ZxOGu5OFf~p@ZX8V={%JSHsWpUNq`wl6u%isCsUAx|iXV@@v9lU3|VDAp#PQj*XwJf{$6AxeO$1nro)`F=`&e zJsW}q>RgTlp?Zy;J``ukJ|wf{g% zNuigIW9K0ZBEr%Hb@46nTZp@KxlVty1+#6qfkL?r79A>V1EqFuVjC8D&+ZFFn{#TUzni86y_zfi&F4)#%lj)3-7%?|} z^w4lp&Iw^3%?Tmi?_i>BZnrmy31@r#_+G(k9U{w`QQ%_*zTDySgO*IF%aD>EP@>V& z1AYSX1ucX=4%+#}_o+xc@$|h$ZRMSqbmspQ#Zen_hIvVyr nfvq3)`Ae9PAwwi}gNPyMp;f$n*KY)0^#IY)RCQJA@pV@@8uj_-%I0qlY|_Q#%M@G8?&*RL1Q;g<1V~}Cg#4oM|OSiqRLd(0VPH! z50t%<9pxSXxv=Xfua|%nYpxxsm{4^UL$0GmK*CBs!YeNVyd%nJN$a*|Yqf)xwsz~Z z#uKRNgSQJu2C?XT;%PrG6P`sCQ%fD}lrdx5|=rM%KXU8K)y5HZ(;3gk*`0wBq8ccfoDZ85PeMS!+-gf)a? z2xElf2pb3^fG@_6E3;*p^BJpnDlX5qUdBr%@-0z5;jxl4rQ{`6r8CLJ4i~d1N{duQ z(X@K0M=J&+zQYTJ8RTgI`j%2k{(date}8o$7Mv*_v8tM0&B~Z%^4j#T7!zMmh@joF z=!5DLEJ^;qRAK;L!a;x$GlY#lt))er)d`QfOD2Si7QAAUVb2Ek`!5?EL%%6Gg@H^c zP@F|)Y)70#ID-cNcLR7;CGu&TE$V6N|Iv*&NR5~&G{fR4$S1n^LklC__|058Ku%m4rY diff --git a/server/__pycache__/exception_handler.cpython-310.pyc b/server/__pycache__/exception_handler.cpython-310.pyc deleted file mode 100644 index 8c31a5a3d82cf1c4b32922f33bddd3758916cd7b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1043 zcmZ`&J8u**5VpO~djusRN=N|d+C$<_P$LwCXb`$YNrO(H)v}(;?PXusK1iYE3e=ZB4pb)51U z8csM(NJ@v48lNRBwT4#dTZx@ILnn2IZn?G-FZG8$`VLs!1!rV)kNfZKVWUP|L>jz_ zNRzj=NwJF3VLna~M)!<5D8A)VM? zu*`4lka263aQiFc&PRr1-NDIcO31-LD7zJ)3I_S3aRqVV-Kq=^n`k~7zyEh7PuZYcO zBzmFJzju1Q6y`$;^KA7}y*Y-Yz>ZX%u1YdWm&wi%E~%HS{d;N7rwQE07hI8SlQx}a KZMtYJH~s>_3KoF? diff --git a/server/__pycache__/exceptions.cpython-310.pyc b/server/__pycache__/exceptions.cpython-310.pyc deleted file mode 100644 index 6723aa38e2010f5caf4b8560ded8eab64b0ddac4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 725 zcmZ`$%}&BV5Z>*N7L){t7dd=?nN6yiyNd zd;~msa%TC*Fv(2!`*vpMn`u%i6&c#k=R@ZUzuyEl%|g&XS7#XRSi~a!fiBs*>2t(J zWFj^-HX^IdTsy?fEi(}{&}TIalKDJhzJYI;9Yfb!zBRAR%k5MKPZDllLz!lAGR%u= z&WJb)4J=<8oyl*|XEefuPnbY`Z8qgimjg9*joebQ-_c;o;1IdrORtv z6m;c!mrj2{?cF3$53uP44psUXa6lBDcZHE++9=Ta$F+a`)_Z`X?Q&hEhpXU)ZcwHS gLQU7q3oFffR?Tm8UzO+1)qJJbG_)e|Wp-l!0h@-D`2YX_ diff --git a/server/audio_utils/__pycache__/__init__.cpython-310.pyc b/server/audio_utils/__pycache__/__init__.cpython-310.pyc index 67224a736bef5abef6e1a4b1413e0ba4fac7d978..8126a8b084329470dd9e028724fc2924694c63fa 100644 GIT binary patch delta 67 zcmZ3$xSWwYpO=@50SJyuZ<@$$qvNHYk)NBYpIDTbm#FWOpIn-onpaY+pOcuEUXq%e Vptd`Xx)pO=@50SJyuZ`#QHf>Fm;KO;XkRX?#PF)vZyB|o_|H#M)MSU)E*FTEr+ YIYYmsq*ynxAX7I#FRx;=1k+}A03Lf6+yDRo delta 63 zcmca4d|H@0pO=@50SG*HM{eYP!Kmt{pOK%Ns-IYpnptG1A5fW`kyw-)P?VpQnp{$> RUs6)6n^=&!S)OS#I{pO=@50SLZK44KGnqiUz0k)NBYpIDHZS!AdmP??;OSd<%3l%JKFTvDuG OQc|p&SdclfSRVitX%nmf diff --git a/server/helper/__pycache__/config.cpython-310.pyc b/server/helper/__pycache__/config.cpython-310.pyc index 6310b02805d1f06356d1bbe167c5d9387126442a..d3b0b83c89f902922433e75cd09c4125fdb54fb3 100644 GIT binary patch delta 70 zcmX@gbB2dIpO=@50SJyuZ`#N$$E4$|pOK%Ns-IYtn3t&UlAm0fo0?Zrte=yZmtK;Z YoS|P*QmmU;kg1!WmshdbkLf!z0PYnRWdHyG delta 63 zcmX@ZbCicWpO=@50SIPv#ct%5V^X!z&&bbB)lV!)%`7t152#GeNG!??D9X=DO)e?c RFDWV3O)SXV9Ln^a830Rx6psJ^ diff --git a/server/helper/__pycache__/singleton.cpython-310.pyc b/server/helper/__pycache__/singleton.cpython-310.pyc index 380d9881d67830405c5c49c6c3bb615fca440a97..ea7bd2a3f63a1518b7190368826bd9620d9c63e8 100644 GIT binary patch delta 70 zcmbQvI**k*pO=@50SJyuZ`#Nmz^LP RUs6)6n^=&!xt&p*5dce}6aoMM diff --git a/server/modules/__pycache__/__init__.cpython-310.pyc b/server/modules/__pycache__/__init__.cpython-310.pyc index 240a23498d5017e9b26e0b38b74b3c63967059fe..f32aafc10a4f62109054c578c5a8012004bc414b 100644 GIT binary patch delta 67 zcmbQkxR{YUpO=@50SJyuZ<@$$qvNKZk)NBYpIDTbm#FWOpIn-onpaY+pOcuEUXq%e Vp1-D` delta 62 zcmey)`hk@@pO=@50SLrvBQ|o2GO2p#XXNLm>L(VYW)>Oh2UI3!Bo^fc6y;~7CYKcJ Qmy{IiCKhB)_F-BL04tRf&j0`b diff --git a/server/utils/__pycache__/__init__.cpython-310.pyc b/server/utils/__pycache__/__init__.cpython-310.pyc index ebca37b9872b1861f21d28a02910d0fc9b1d95b9..0574a1c643ae5c698794546aa0b457d5dd63237a 100644 GIT binary patch delta 67 zcmbQmxR8-MpO=@50SJyuZ<@$$qvN8Vk)NBYpIDTbm#FWOpIn-onpaY+pOcuEUXq%e VptN%~=jG*M0D|Mvn>KQvV$^Zb&&bbB)lV!+%uCdF$xklLP0cGQ*3U`IOD{=H Y&d@I@Db`Ia$kffx%d6P@i;fj=jG*M0D>_KO;XkRX?#PF)vZyB|o_|H#M)MSU)E*FTEr+ YIYYmsq*ynxAX7I#FRxBe5tqpeR2pHMyi% RzoevCH?bgd^J~U>W&nqq725y+ diff --git a/server/workers/__pycache__/__init__.cpython-310.pyc b/server/workers/__pycache__/__init__.cpython-310.pyc index 51c59bfa3037208019a1e0200a7403e1c0a5235b..c64d158c965165fcd0496930c95bf89fff311da1 100644 GIT binary patch delta 67 zcmbQkxR{YUpO=@50SJyuZ<@$$qvNKZk)NBYpIDTbm#FWOpIn-onpaY+pOcuEUXq%e VppO=@50SJ1x$8F?}WKy-$&&bbB)lV!)%`7t152#GeNG!??D9X=DO)e?c RFDWV3O)SXVJb`Hf8vsg(6nX#v diff --git a/text/__pycache__/__init__.cpython-310.pyc b/text/__pycache__/__init__.cpython-310.pyc index bc4d39aad8465c13d7ce5f527f8241f8303f65b9..d48f6e32505a399c6a3108c5c8482d44df14e9b1 100644 GIT binary patch delta 70 zcmZ1}ut9)3pO=@50SJyuZ`#PM!l+}TpOK%Ns-IYtn3t&UlAm0fo0?Zrte=yZmtK;Z YoS|P*QmmU;kg1!Wmshblgz+jH0N|e&82|tP delta 63 zcmdlWuu_0KpO=@50SIc!A~tfXFshpBXXNLm>L(VYW)>Oh2UI3!Bo^fc6y;~7CYKcJ Rmy{IiCKhCFj%K{d1^_iJ6b=9Y diff --git a/text/__pycache__/__init__.cpython-39.pyc b/text/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 2cadac7148588fbb24da388890fd858ed1edb606..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2126 zcmd5-PjBNy6rUM8iSw`9ZI?pD#afA#tlf=wPe@T!fwmlwp!~^liKr~sGfk||V>rI$=ZoC~FaZs=FprJU%5;^OK-M_d>$VCW;5Qc{qMy|3AlE?L0}`U9J} z1M-?E-1XWiS?^g%(~@e-u+#5fgjsIFT$p$|eI9)Prx1Vg8w{c_C;!5FTCmFc zH>Ro@%0XAEsks*0K%xy(!eClIs2U?=qtgRoM3&4zgbI4gmQLZEk&C0HYiQvflVuI! zRU6UwVbAQKV30H#KY?Jskoh-48STfuHY&|WzP$3$W8X;RiP>D_i9qhhXWCDsQbJE< z&T%VjF=OGca_6VH{l(M0mJiQ|YNY#?@~zDNS_d3VD#hDsR(n^#i1IiSy6@YMwF{K3 z*8Hn^o|shT8YQUt8`WTglJn8n%KB26xyt`E;*(Sx6qM#hi~xE6*dHq_U}Zb9`XltQ z>_(O65#TB1LA`V<_Lljjw~pbkwQ;sy9W9$H5h>rve7TRpQ1tpdB20H+BA&}y)PuJ} z)gkK2F;u3FSWl7}@fFh|5@+izijmxkbii)_NxsnZy%)3dH(Y zrHKtYn}^i?-{^X0bH}FGcVV+e!$@O~s<+^~b@9DrP8zZDH~D5WfDbYySiC==(^Q7Jlx>)R{d0A0?Vez z=b2Ek5f+&0ZD87>w@J5AvI;iLvC+ocqx4QP$>$Ssir1<}fT2kp=YSqK-hHp_HU9v5 C^#A|> delta 63 zcmdm?uu*|KpO=@50SHRVBQ|pHWl=TN&&bbB)lV!)%`7t152#GeNG!??D9X=DO)e?c RFDWV3O)SXV{DsAZ8vtB66yyK^ diff --git a/text/__pycache__/cleaners.cpython-39.pyc b/text/__pycache__/cleaners.cpython-39.pyc deleted file mode 100644 index d9c57c6ab67db62cb7abcd73f17a81f0c0010130..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4463 zcmcgvNpl;=6`lnfA#U0z(b_t4WDAi9*m71`QCX&yL{*BcnDQZ0a-ka2O<ADp$%gh1b*j_q~2ggU`)X9Xx;E zy|Vt#bB^;5s!Tp*R6fB^f8aWfZ~{lT!i(L&wWSw$w)6wvmW7~z(vOS1Vo-D)>F$<9 zVb2Z9vbn4V`q~N3h|}T>%Cq9ESVFlZ&WZCV&xvL64$AZ5 zg1CrsSzHpAQNAOt2!`^4_>s7Z@}hWG{21jWaZS93^0IheTt|6Dcxz6r#=r3#hpnr$ z$2!Jj`gWtyj!dWDs)uQ>@l4`7GHJu`Ff}Sos@2t4@)O2W4ltf9J@kB7W!DD8ko~%zS9rA>=B-1jMp^<{M zHd)5CwsRP8_4taAany^bGmf-j={gGsz*xe266gqwWLJl+`f9banMsv#g>EBNovo!k zIhr(Jb3bWgvCfLlWEibSVV(Uk?Y|!b-)0ttJdQWn3!Z>Y%KBQ~t}^yvon4iKSl>AQ8)oCT4$SSpNW(usTq@> zLc^{{2{w;33%Qmz?Recu`>|j=*3eWYxRD1K^=7Y?#u|pfhM#~%j9^1yho&or8N61n zeoKD-Xy46e`Xgd_H3I|Li?*cND#~P~ub*tqD%pd4cU^(2Q0sXDg z)7CZpw618yZ%Ner+*2|y!_c!RmU;ETZy$aB#r@TLYxiqj?$tlY-Ix20n#a)Z{?O

?q+L3;bck?2|pw^a2P=u^ zY(!8VMjF~SOk<)D!v+B>_ts5NfV-P1d+IVB6bv z_WT{^d12dGbN1awIQ01@Z?!-v8U#}{gPFOn`z`bp=$P`Nl5P1iKY_E1c_@1_G1d$Q zVu8k=kuNjE)#;@;dNd(&+*x+AuUx`ay2&5LpRLaKdN zG1L$5Ib%D?{oCl(w2KYmkZ+;r55%WyTiLuWxZUm!-?;|j#xEip1N<9##V}1^Zl($U;yR3? zbYd6s`Zw=#2EBt_b{qq`uiPE%3Cix)<4-B%vMkDw&=O=+%AjL?A}=9Fju>iopbkbc zETz;+Dfl3()N7vAydvJm@|d3v@|sv!+wdxG#Z@={CzFfEFfp*@2}U139&w>Oczjx$ z1dU`p%4c<)K56nT49-tZ$Gu~mu(pT`nXhBHfXz*OPn`=L@7M@`g9=$XT@ClVgR>bi zGX8*>DRhh5P8%0TQ4qz~z9=m_i0-9r7rfnV*iNnd-P#oI$y9z5$u5lh!rosdT%1{7 z)nHI(H`qjmLT!+p`ULlHuX5*!B3k=iUPMQf1;wPBwzSO|wQ^n_280Rak&EXi+i}|B zaq}Q&<}(N1WFh;W_2{57XqNi0d!0Ody}9)#Ow;7PjyDT~p?8T4=Vxenv#RM>4_u%N zlYu&+cHu*v1*=Z~K%M)=t!Z_x;zoc(ZLd?`U!4Ojss^wOR9CQ;mY+SMr)J2F`iRC^ zKl&+s5$A!hk#-S72-Dd5UZa0teSXD*N<8&TwEVwRqJECWYD2)w#{zERtlt9q+z2#= zO0LG(zd3^a_799b4a2u!zc9j%p}6NJ(Eb?z-VywFeqj9gAM+Oc%Om_4Dw8ncWAuMH zg8uGt=qXCu zUSoN=*~G=K+04syGe-~|+z{wqsy-p%L*z4UX&%~}d$16y$j~3Q=I|+qW}N^ delta 63 zcmcc5dV`fGpO=@50SIPaJ(9YSr-Vt>L_Z@xH&s8eAT_hdP(PqDIU}(sH=rm#D>b>K RSihvCSU0gCbMhJ{V*rB;6@dT% diff --git a/text/__pycache__/symbols.cpython-39.pyc b/text/__pycache__/symbols.cpython-39.pyc deleted file mode 100644 index e07c3383dc2d41c4d442b8ec31bd0c6d7e502dce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 754 zcmYjPOHUI~6z;Tz79wul7&E#dB-)9ID>NzsDk@MdzGgF-&Rpn7J2Q6fXu(YZQ&CVU z4*_{Af`|`5P!t~tk2L4h;L6QxjN`v>qf^4db8^4$J0~aS+}wM_qM|~VV?#gI$t}0b z^*a~*ZyqN&?i?crvWs`g4A01J-Yt80kL=~WglS+%9&xuZyIc-?w%MCwKA+@|`{cX{ zhWE?)Aylx3wZZcjLUk7}UA}Ua7sHZF>Kh`; zwZ>>n)%5FegPJ#PCYqYfpH5pyLl<LmW=HK*kDW@}sct*fZ>I)|zs(f?T3WFfwx*=O z@(F^hNrGT`1znW+9fEGE5;aAtXe#d)qJ&ZsH=O*o5)@suyivuVmT&u~tUN`PNrLx> zf@S5ELRH0#D_-C{;1>J|axs;1DxspTALN2@EkPuz2{pALE^J7gp%WrSxVpL!7bm)A yaMdD}BI1EuBDN*D+yZdhvjTdP Date: Wed, 7 Aug 2024 13:06:51 +0200 Subject: [PATCH 06/19] Update Dockerfile --- Dockerfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b4348a0..bf2d034 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \ autoconf \ automake \ libtool \ - espeak \ pkg-config \ git \ wget \ From cc2fae9a6f01c623169c2ec7291e8b4184581ea6 Mon Sep 17 00:00:00 2001 From: PaulNdrei Date: Tue, 17 Sep 2024 15:39:50 +0200 Subject: [PATCH 07/19] Remove cache folders --- .../__pycache__/__init__.cpython-310.pyc | Bin 167 -> 0 bytes .../__pycache__/audio_utils.cpython-310.pyc | Bin 2514 -> 0 bytes .../helper/__pycache__/__init__.cpython-310.pyc | Bin 162 -> 0 bytes server/helper/__pycache__/config.cpython-310.pyc | Bin 1612 -> 0 bytes .../helper/__pycache__/singleton.cpython-310.pyc | Bin 670 -> 0 bytes .../modules/__pycache__/__init__.cpython-310.pyc | Bin 163 -> 0 bytes .../tts_request_model.cpython-310.pyc | Bin 759 -> 0 bytes .../utils/__pycache__/__init__.cpython-310.pyc | Bin 161 -> 0 bytes .../utils/__pycache__/argparse.cpython-310.pyc | Bin 776 -> 0 bytes server/utils/__pycache__/utils.cpython-310.pyc | Bin 1123 -> 0 bytes .../views/__pycache__/__init__.cpython-310.pyc | Bin 161 -> 0 bytes .../api/__pycache__/__init__.cpython-310.pyc | Bin 165 -> 0 bytes server/views/api/__pycache__/api.cpython-310.pyc | Bin 6193 -> 0 bytes .../health/__pycache__/__init__.cpython-310.pyc | Bin 1386 -> 0 bytes .../workers/__pycache__/__init__.cpython-310.pyc | Bin 163 -> 0 bytes .../workers/__pycache__/workers.cpython-310.pyc | Bin 2336 -> 0 bytes text/__pycache__/__init__.cpython-310.pyc | Bin 2096 -> 0 bytes text/__pycache__/cleaners.cpython-310.pyc | Bin 4152 -> 0 bytes text/__pycache__/symbols.cpython-310.pyc | Bin 735 -> 0 bytes 19 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 server/audio_utils/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/audio_utils/__pycache__/audio_utils.cpython-310.pyc delete mode 100644 server/helper/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/helper/__pycache__/config.cpython-310.pyc delete mode 100644 server/helper/__pycache__/singleton.cpython-310.pyc delete mode 100644 server/modules/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/modules/__pycache__/tts_request_model.cpython-310.pyc delete mode 100644 server/utils/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/utils/__pycache__/argparse.cpython-310.pyc delete mode 100644 server/utils/__pycache__/utils.cpython-310.pyc delete mode 100644 server/views/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/views/api/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/views/api/__pycache__/api.cpython-310.pyc delete mode 100644 server/views/health/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/workers/__pycache__/__init__.cpython-310.pyc delete mode 100644 server/workers/__pycache__/workers.cpython-310.pyc delete mode 100644 text/__pycache__/__init__.cpython-310.pyc delete mode 100644 text/__pycache__/cleaners.cpython-310.pyc delete mode 100644 text/__pycache__/symbols.cpython-310.pyc diff --git a/server/audio_utils/__pycache__/__init__.cpython-310.pyc b/server/audio_utils/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 8126a8b084329470dd9e028724fc2924694c63fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 167 zcmd1j<>g`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HQenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|oX-Z~(d}&E$ kPO*M`d}dx|NqoFsLFFwDo80`A(wtN~kR8QLK!Sw<018DZNB{r; diff --git a/server/audio_utils/__pycache__/audio_utils.cpython-310.pyc b/server/audio_utils/__pycache__/audio_utils.cpython-310.pyc deleted file mode 100644 index 22319df8a13b7b7e61627283de5a57a3714320aa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2514 zcmZWr&2J>d6|buPoSB{vd%chqt6&03G!fW}B80RegwH68v}Pj*k&HmCc30W%bx-$f zRkgh)H4r`={Y?tlWBj_cY0I~ro+h)^jMGwBi<36L!Wnf?{j~$!4uy9oJ=

Mc(9$$0ZSm>t<0Q@i=sV<|Fzzpl%79*MlswFGZ;U)H%$3e?UA;$Xkd zPlN_zv_BKsiIDF+%Ee3b0c?}GT0c8DPhvLtsQDSsw&W*}h=kwZ@QiJ=Q>*pjy2DBA2v zq-P+d5cvW|K4fFMH}D= zv#(%8`10eu-Q#*Hb~Bk(+3wqQ-b_WM)oz(pM_S~^yIQN4vRUyGu6S1oc`D>?+gYQF zQeD?~X6Lp$E3@-9GXv;eZGm8)Fgjqd=hKll2x8___CL7+A9Bph`sss$Gjf^w8kA3| zCKqH5tA0S|K1enXKzj2Kq(6^91`on{tfNJ|B=e5$T#zqbnRg9&n|$=*yk~lgF8F&I zvza#4XGfD}*e}{tcoLt4@PK|>-Nnt&;&qt_^rW@V8np~gcQ zVvEkS=Aukt4{UT==U_;iKqJ@~Y%93!X)&D%ndwFf`}C^67P4<>EsKK&F%hRJFLIrx zHp=-CfSXjD!e*ysUC$(f)&>fi%&b?6$_`~yrA3vB)w!|pk*IJaVZB+S$9>xY z)M-({Ndf>qS=t(F?Xd`RcYNec=w3dBWAt5UQ5~h&m;43#ZmpZQ(P?%ybsuMEYSxD_Mp3|7|8vxVXF;8U-HV4s9^{l+wAP~c4{z|p>1P0F?kzk6 z0Kpire6YgSGJ*UJz%jf9P_Cc_0NP&!OY$L`cLBuVA~M}2wu*HJ;7d%$FyooX#D@_N z9%{bUc)`9%=6%y&^x-fJeg@xR+;_FG(R-{p_cH)b}}nMBU= zp;DH~gYkw9X0oWjbt2A{44^+79@cW2X&WQH4~tURXnLa1u&h=#$jchY$ivWneq5A1 zowb~f7|ta0^`I^)>(?`YvX6Iw4W*dYr@}HofAl7D<=ywjBl%nCLvEsYf>C(`O{6Zj zIQeVT98gDYb=S_Vjc`usA-DbYOw%fx3QK1;a2^}ijZTqrtq0mq&?>i~?>C_+oX3DA ztP5N@pgz<-OFXoE8ebw@(Yk%v^}FD4?eYlPtRDm;621fEeNZ&A8A1d=Wdcqghx14S zL^%UU41qTpfDhoaWRzTo1#%Yf+H==|;dsp38wV~SHgLRT`zuaLo#`^=p0{C@KMg$p|kKNZGG0 zC4L#-l+QpVe~;p26n_9=H-RnkMoJ)zox=v{Qnj2noHnH{R_Fn6x1icW6qQ12vGe3& zR}yb?yV2f)J7WEpu!8K?%6}|MVC0<@@i`uUYW1o8dr&*8+GWTFf59IJcOg2Oaa-$_ qa&6&wGfGEq@vD&SrWCK?ouY1oh{>AD5}>n$q1^J~oAI;B^Zy5}u7_Lz diff --git a/server/helper/__pycache__/__init__.cpython-310.pyc b/server/helper/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 239c8f3a29c763f1d87627b922fefe2fbf522381..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 162 zcmd1j<>g`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hkenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|MYEA)=ijU9C e%PfhH*DI*J#bJ}1pHiBWY6mjDm&2k(y5SBDQJF{Lpjty~&6vVjzbMS7869+04%2dH=IXDMuE<%jt^_cmKq)CEn zIN4V&JV7|-%1iJLx^m(bxInii>ue|oP3fCftEV;H{k7Wd?J0rr$GzWAf88bIPh4z{ z2o{fFnr{Fkk+dQ?zF9?UmNSZSPz80Ehm@q`Ya&A#T@e{6CgWd%Jo37UOhA`hvOJdb zgke7?4Syjrm4J@{+R!H79%?oa=DbQwf1h9Av(|iD+NKPfmSxQqTBN@o> zD$E1Pztn>{=C&cDs5cS z8^w!W3KyePcd*+^Rq;;PGdFwI7O>KqO?fGeKSRg3H$g}cQUuJ8-UfI@^d7waK3<%)wOR-*8nO7Q zE&5tD)+{Q~tSrR5fO;K?t~|oxEsWC7lwO>v3KFm=d@bhPPcGrSY)Z@dr{L0{-AmNX1%NQB=bG5A!~P@7BzdlWW#RGQJ{t=-HR(9WWaIw?K}q2g*oc&$-V) zkOcTMB2XD*<}vUX%8_&AWjxT|C^j1Kj4hvJ6L;J5rmt*?T*Jb&6CADmzmyilbR4|b9Q11hR#3RX8sEvJt zPJE(*<&%r35J$>ni3`t7+qe*K7ZmlgQVft7@ z7`rrP2~F6PT|9hOtnW&yejhw#k-h^jeSmNm!PEB;r8kN8lFv3KnZq>Lw+Wfj+0-*G zo9G9ifxEzW-+b)8xmn+@8u{n$m_nDGHS@ALRa!NL^7pOA%=<Yn&_xSDNQ|wNqFj zg!j;}hJS~KYj(>k-iQmBPySLD%rT1dwhnbl_KiO#{uLcGRM*FsMkT)-V% z;s`vgvO5q^9T2RDhZn6~8XbBYbJ0C(pd85p>G75AoDLaxM+X;daJ(l)xM#O{@r;9DeH|a%v+I#^g{6Bpon~(qi diff --git a/server/modules/__pycache__/__init__.cpython-310.pyc b/server/modules/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index f32aafc10a4f62109054c578c5a8012004bc414b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 163 zcmd1j<>g`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HUenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|!eoARhYO#KN cd}dx|NqoFsLFFwD8;GbK$oyg^Ai=@_08=0*n*aa+ diff --git a/server/modules/__pycache__/tts_request_model.cpython-310.pyc b/server/modules/__pycache__/tts_request_model.cpython-310.pyc deleted file mode 100644 index 5779b09489e1782f711a8308cc19b634bc15985a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 759 zcmZ8e&2AGh5VpO$$>yh|NEHYnpq)Z6Yp1qTCB2p_ zZ@>$1;J`tkhx>?I@QX9N#dir^H@9ix*( zQbihCQX0=pu_h^#CM{D+-VvF|^fQquBzH-f$?SrRJNNjCjA_WObZzvQh29(C;JuNc zLUvYzl2yF%aCH%!qB)HYNJ%9r*%fv!6G$PGY{s4u=*VPBpexfmTIP@;&i>-w4tH8C zyE6a6%Kk3an-bZV138p?vL_$O(bsfNYux2PGUU4-MwgctAK`ih&hJ>>W|f$LI}f=M zdcG2KI11T~sVfMn-z?#tB1;^w-_)8b&~v}Q1gw2{{K*;3UA2Hla12-xa#)LIslbKg z%$U%d@d`9Qesyv?To@$|vF)K3YMj~*R<%(Hn@2Z@O@<-oT#E)c4}H!XBUdV(hn!!p zglaSGOZ-U^66b9gaW1qrUU<~sac<*>{#Q8+u@)#^89l4#--vw@??m_OL$NRo6v7HE zinpd(HK4sKq81*iMd7`hh-E!7TCWQS_6BUx5_2)fZ7XFY3+iaOu^Cp4=3b!dlLNYs z`+%~9QO3}4r@z^jZS(E!?)kVAIw*9l=XN($zT8NmeO=iEE41Upg8xXjr>7VT`LreT OI{uQ2q7DhROa1`l?!LMJ diff --git a/server/utils/__pycache__/__init__.cpython-310.pyc b/server/utils/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 0574a1c643ae5c698794546aa0b457d5dd63237a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 161 zcmd1j<>g`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HEenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|vNoG#5etdjp eUS>&ryk0@&Ee@O9{FKt1R6CIA#Y{kgg#iHjwkFX4 diff --git a/server/utils/__pycache__/argparse.cpython-310.pyc b/server/utils/__pycache__/argparse.cpython-310.pyc deleted file mode 100644 index 29bbb0a5c143951c50b36bff6a63da251eeb2e3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 776 zcmZuv&1&2*5SHYhb;54*2ch%_d}(11iQAq_Nok;`-AfMzQxL+^Zk)9(SCT`raZc%j z>>hjVOY|Ld?J2L&Q%80emQultHPSbqzZs1jjdl^NpHF|xo)d(AxnO(w5S)SOm*60@vLrI@3XaKGcgsn_KO zc5hODmTDoCexI3KR?{p1_-48N0PF*p{sPXR*1N|BugDrLe09{~6)>&**6+{OWR3A( z6ekO;UYNjTVALT!hg{5I@K z(w#sR|d^o%WRT%;ZQl>zT+$dK{~SWkMR0E2tKNYxrr-MXn>iAw?Q?I}_#x z80`Ro{@dH+!0KYQ>cd6 z;qX&sMg`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HEenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|fW@>q{etdjp eUS>&ryk0@&Ee@O9{FKt1R6CIA#Y{kgg#iHjHYUyh diff --git a/server/views/api/__pycache__/__init__.cpython-310.pyc b/server/views/api/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index e29bf18f988041908ed5b7139a00ee04a293480b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 165 zcmd1j<>g`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HAenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|fW@>q{K2TIY hK0Y%qvm`!Vub}c4hfQvNN@-529ms}aCLqDW005?5C_4ZE diff --git a/server/views/api/__pycache__/api.cpython-310.pyc b/server/views/api/__pycache__/api.cpython-310.pyc deleted file mode 100644 index 8473e6889c386e0ff53b4b5876a6c5da6f8e1d2b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6193 zcma)AO>7)TcJAuwnd$lA@K>T}{k3Gt^2p{;w%5B^tyWpt-i_thv{n@2-N9}e&8Z^U zRR4^tYe;c0%x*#fq5?3%-uEy>kYIJqIky~g405eY4mkxm`VuS>INz(D8B$ULWCs1Z z>ecJ2SFc{Z_f@4+Q&kJUe>(s78~^#HW&ImfPXEfN{0L7rXImDxIE$<{GjF?Xn|GmI zFmI>rn0K*VH1ATojJF-Raiv{h^lc%k#GpIy)1EQ)626&j z&zf>Mnv3V#^QP=Z3-Ouu8B?xAXXC~8qA6FSbMaDp$&_o+8}a$}c~h=O7vhWUi>5pk zU5YQaFPm~B^5QG)EAiF#Ra2ji-i)ucuf^Bf*G+vUdMm!szG2F<(c5ve-HdOxZ<_jC z^iKS4``viCz0538;q!dqh_zc{k)PpbQCbns_O0Q2hQ2SB#0Lj#`#am;IcCSK{h?@W zf0r+c^QQHC;zNG!*yc+|jGyCg92L;t#YxNgyrumIn^y3Be0Q5)5HtMZkdeZ&8lvo)*v!M~HYtTvgh+N-7bzDJbZKmgL`byzL63*2u0nV(FO#O92gznH*c3m0(h*%1riq?e>x$s9kP{Au zs(zd%8{y`ctE)f5N44ElLcmXxuBokMVqS2}D^3);8O=_n9gT9nQEtutGLqf6I<-tbJj%Ukwv3 zp0u`997%)^>qSw{6>#Zkb@z19=zjzKedB;s?FA;6QRvT;nWgG=m*IY}C|oV;pkeYO!Gi?{}ICzSj{np|35p9;jYSYnm%nbVZ^y0c2cm3Q&V4Geh{7#Ptx}S2Y@02x(zo)v zsPv~-cEx&TcIPq|ar%Ecg}erFTK4(L2R?mpvi|ZqBxI8aEz&798ZUqHA+@%Md=I35 zCSR1DAdG_bNG$K9VMn}f@UeGhq{zfOEmh!5!PwkhvWCTDM8S~pf-$+Xs!Mrix;A#H zkZ1A(bTGE*Tcig0fU&N2qA*k1c`Wu2M&wQ6zcQDZV63R0_x&Y=jA5&`ZO@Tm?|{=G zmm2>_b*dYGIfsI>2G#(IAKClPz=7Y`caS9EB{rg|S=gp67a^$KOk^2iPu48LUnp?- zU4%S%QqK2{!iA7OF`w!3i2thE3twbn2GE%H=*O~kXg}!SR1N-MCM`(}? z9`eBKCSrwgc%~2^C^Ep_HnRq7yRhv%C+9CdC-*Py+lS6!fjd~4^0BqUbz`P*mmtX^;X|6fKELxgEpQQL8qUWKxbey@Ux$Q&Ss&S8_+n` zz!?&pI68H_`tMlrzx^#g4ne~i4 zvym;Q`OH&SdFt|^P5e*boBJ;0XLI?1Yh(-0oM**nr45JA@%g9jeg#&&a>xi<{Pns0 z>YzHjs@{BNJu9QfGkK4Pqu!38`BxU7a^PJDzz~W1o|XOcE;OM3&+(kg_>^FYWRYRv<>X8?p5GBfxl^FrmV2c5@WKze_5?@vY|8$c*0 z6MZlV&$q`qEMvX~{@i|Tw#(kV>^d_^;PR4vFR8J<^sR3VuWVgM0UOo?+ z;ut=RhgMs`#N@_&lTl#XR}3bz-&G(Axjf#~4Ua4@s)Ltk>bo{Tush?D>=A^TO74>y~CA@)>~^U74tU3LjMOkTRV#?%w?y>YoO?VD%S~ zhFW)`v=c?7Xdnb=ffC4}&7C8cvNJ+hag|JN%;Z6t2*#w%6($=((lTK5ljUoc&4TMPN`4N~EXW_?rHfsOC@2?*Rn!j65euIW>l>^}XdqvBd5^wuaE!>BPL5N^ z%M8vTfJttWk6_@70fXbS33dEC2xoI3E^{5%sTNAC#2i-oZ>QKl_mbI1XBhej+n_QE zjKF8Fo*oDg8*>xDG2Y?s!#Bv_hit<+gO}Mh<2ty2_t|_YZi!Ay+tO zy*3zumNh)t>A?nUXdPQac5Iy<%myYxKoxzVODZKN!kDZ07FCgC*c2}#JTvycpk4j~ zq+cj-4jn2gc z0!>jqq7Ti!D}Rwkgxu8%kw|nAr>9M!od;^v2hT>$C9nUlM#)8c)o~5SWi#N{*@dEOyKK&G%wJ^}zHO8mptjSfgA2Uu+Eu%5 z%RLO$pIR|qvYaPPU74JBUUFPIeNj;QM?4wrz&{5HCYQYr@c*enYeO4Yll5;^5U_@Y zaSOthGb|45VTl8w9y8!#Di=_~;bB--?w|nl%MOTBG@Qx+eIGy?D3=YZgmaJW@i<>M z981;q));Utv-&kvA5Iaf`wEzmgZF2+y;~cVcG<4gbXFfpv(_DG-E8Pm(4jjKT@C=N zI&j6}PkREF7xfWV^S84!ky{X!A&4$)3Z-iu-2C9Og*-w|Q`-!+l{Rsn5mHwsw<@^7 zkU>Wfe4>DZpy-Py9XLaufZU?EB2G>ly zvgdJ}GO#VbrTJHMka-~Xui-*C)!A?#a-zAyOku8If76&g%iLev-^#y)Q1cT#9mAo+ z6U-fH zCMf{w*-;m#KA|ltDHKb(z15YCUV_`9G|D7-pRR<+3wfP(G;+>fw$3GaC0(|uttecV zHLA^z`^NCNSt9_KNjd}SxpB+ro(Zr-PEiAJ1KqT=km-7mZecfKZyPY1BOXcTNPTG} zmak(^g;CZzRX*h!vm;5YetrX&XB2{6%If(aaf<#khUR5M3p0jCv|6az7np}TA;+$R?-r}9UY=pL5aqVhCkjAZY*)IFsd(Sy;y}LV&sQvNTZ_7W^DEiA!9=8ci zzNW`sQYRu2*HIHM;wD)nv7aY(+GL9?jxZLf$nN6B==(^FME*V!IcD$DMUJC4V=)$! zRV=o|6q7d-%*FOyE_N_qCSq6Yz0cmoi!HH#9?cH^rZ~|o>9@W){ozb^4o#n1$6aS< zaliE~xA>D5Sl^C|3VH1+TFqO7OL({ROWDY!%6N`OJv83orBDrnIxwP~bU0f!+e zC_T!&Pfm`E&*FGC?k9{X-XLTBlrci>YCqm#?3a$$n=fO=gf1Bihx*hy)9*9JmD0q3 zqiA|7)J-Lwc+?v?G;8au<`bDgQmDdVt!~r|{v_!`D|zv|1|X#olO26+S8)w3@?3 zB`x5;TT4#*8%edudJUHdokanp#9K50l(10Q&X8@u=*!MJD7A82Dm0)C5lSKlZG*2G z5^{xJMW8T}z-wOO98O%d0rV=$5-oX8mK7P1;%c9Srkr8fsnS#E&8U|^r-1GeY!1P- ztZN`Mfxx!bYmk16v?0@nng)I?&35Di0^fML8FXK}a;^zyd+&-=q@pVs>63LP7c@6y~R z)J;D!pU~@gHtoktPN1zceto!ElXfk*ag`kg5%Pg(m?cM5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HUenx(7s(xZo zVqT)YOMY@`Zfaghv3^csUV2Gta)y3MNwIEXL8fkgUS5TMacWUnYLR|%hQ!yGff4g+Vj=>FG&NE4`1Oox5&>K>O|dubYEK zLVm}|>BeAk3wrT204JQLWLS-KNMX)WHa3O^t#szlEcaML8%86ojh&%0t`F%FgX9;LlF zn@r`+-cPeqw=szV_)_mK-$kAF!axRaH9t(FAcJoSe5N+S$@GwEi_3?TNZUNxj>G6a zyuSHEAj6Tr8H++5{RQoiiWRUpG&SAT^<5vlvn5A`whOpaRA{r9JSa4HQQPHB3f;)3 z60YR)SZELOKN4Ax`rCO3*A@L`ZA}EcMBAIW7za{YG8f@U+cL_ETnG$>Hq$)Y(k#og zHI;E%XhVS9nn1OjR1#iV2H{AYMFT6Fj=`r^F$&;4ESqR65F&Ue+T|*I@5Xgq+k^s> z*REnf!A)?5puqPF%$>fgodN=hRjL~h7IdQ@b8U^$&DuSFfL~3U(<1VNG!6>zK_`oL z{O2ZxZbI6?xq+ODNW0}qoNf7_hy<__5j}+00kl_o?Uc2+glm{-6)fPc=Bb3Z3X0+# z0LN_GkS@%Flws8TlbPlpUE8Ctuw~FXv;oKrMi=QhYX9NY#BDGNOMV0a8=n1)PcA}c z9P>%zzxYHM;(}yQjWupToi{m|wX(W2Kx2Z=-Z#DjEmW+TSWxFS@J_Ad9S3;FOll>+ zRJ`ND95^RwBuxdpQu2nYT;!X%3%w1zgL^FHCLQjUyz}gYSzCI^f@;qfOWyI4CEkYn zw3Q{s$}V}Q0|d}{#ycJ5s>;Sub>JN@>7s@7=4TYF!55s8ca~vg33#UiyyMl0JPWMk zaSzyMp=6%Le`g+nMJ#Fv=x5PFcrlh`0(_79C4fhlPDR!araaDHTJWNa7H0tZOgG9L z^e2HFX$KQAobsS=h&!;XTPKN%%i;?(TSmaTFN;N-1L?jvkMj!%uOhsLa1r4Rfc`nr zNBKD|R^PgY(shLQ5RmA~niTJ&^Z~+$2saVR zI$RJRp}dArJ*fI2tO`$jiTY0vZXuNK`xK?m5X$2E7^QWD9s-t`z+cRlR@TBl{QoU% zE$}{$QPo$A+H{fq-#TM8&k=XQ92tc}#mjgMlmtXz;=h5)bEP9o-=JIcauJGnA`77S z%}5{xmDFE4r1E?j1FYZj=u;_GCRfQ3&Kjh}G>z6V(gk|LAr5UB4s~dkGUi-v_5TJ1 CltbSD diff --git a/text/__pycache__/__init__.cpython-310.pyc b/text/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index d48f6e32505a399c6a3108c5c8482d44df14e9b1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2096 zcmd5-Pj4eN6t_K-Wb&`uZC8bAFC!$7M!TEY8&XwOi?$q)&>pC`R8*ts*iA-F?8J7K zZZ)}V`vrhcu~)tX-@zv&PMo=M;yq8oCV>M;9I#~1^RxZ_{@xqa?{^4{pC12s^?j3& zUvRTN0@%EOX*evLoD)tt8`B9pXB6pR98ALV5a^IM?ulrK@Q$96J9^&a%`Hxgw|I0< z&LiID9r(6+m-pb?;eEaX-!6Y}Mv~pzC-Ie5lXzt9)EuShu(YH3<)BcL^jZL2T9w+o zP*y7${|aW2gu190i(CqAfNq&LlS?%=1;xYVKu1EDFJYPkSV~fni`F-6K^LrI75#zD z*co|8G~Rj_I5v3hXz8f&48Qz-oQ~8)q`A&zo}Q{=J`vKIbezkf6~!pE);!Cnf^~V>o;$20#qSf>{VhMeo=msDe{+(Ora=R^cI8G$1&QA^jBY%-R)%D53dN zi0n%xUkhzboX5uMQVwHv6{F9wRmc;wxsZiG?iZ&fE|k{7OqJw#6|UHkh?j|qOX;`x zdeDhse5Qxy$WyVGd9>024~tsyrkds5B?>Z`PlP#&{m0t{%9dyG)m#>~RMMaXJ%6tn zY|v^x9C=w^2|L&FPb0o8jYUDFv|#V(P)=yn~DWc?CnRmM=3`MWrBNR*zmN4qDh(@$WL#iJEV*_`H5u6?udq7~V z-vchNo)@&Dko(~F=l@~i|I=jsB_`Mq&R`mh*bv)Bb3vhA=pp&eHw~YFgz7$=Cro2~ zX^h`?VmilTfRS$iAHlJ~U_TbSZvk) zWnNq_%hh)ZhaI~Q$oE$HW}SrI2iJrqF&|sK4|;Wk3z}0e*w)cK~pI3;tCIdop7cWItiAZuh^I z*HV2eSDoZ2{@5geYYm0Xt<@US>Dr&)6k>|~1zg$zj(&jb5L=OJ)hW?iPHC*o6s9lI zh@uZYzxtC`Nzb*ijH@EcYLQ-354|5OTd2m2xweq-OVYU&9*g=An7+<^mVH>f#5Mgs iygstg@kH_YSe)Q*(;$9B(gQp8f;}1q(Zi_QYySZr`2$=4 diff --git a/text/__pycache__/cleaners.cpython-310.pyc b/text/__pycache__/cleaners.cpython-310.pyc deleted file mode 100644 index 416015558a04dd3c75e151550ff5545ce46ebe6b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4152 zcmb_f&2JmW72gkjiTbc)%d!L(l-49kNG29uEOD}iV zGeg@FB%pwED+08a_9`89%|Fpg|AM{tluIwZIY86jn)bCY$v!W$ewPe>5CPv#8U{RycGwdhLz;vQw7gnn>_>HWMdqtoyf(Qg*X2+XlS?5o0NmJI-`D^Q=mgv2A!#4Pp^!l$B$U!tTuxfL1mls0rc%L;JjSSZ`t2mrFbp>Q1T2yWHWYMexnfwsYxU}v@JHwFk(g!ShmtZ9GHPs zd^Y<8R^8GDb2>XSVs2>+2i>d`C7q5`nHTU~e@nj;xCCSPz*!uM7wh=xAA)q8J?GP< zeb+b#u5k7cGgh4y$2dLj&_mB#b+)~~c#p`}{N2)4`z^gySG41oL~4HKDVddF(`gvV zth({h-H$)Hy?$f!cFoJY`g@uC^vF?5V103|(S^k}V8AhN+)9E$U&cl^2)>a)w_%K4 z<7v2-#PQPxIb368kx+0}y+(dQ=UjgeyXgu@$??3)UzfgC=LpJKoapzJ&Px4ILo)=R zuWSz{OFe;}NCK1Co;F!g!;=k71xYRhk9Vls17{DHW;y*n?`1`hyw{O&RtA9^MBw!W zjx6Wf(wV-JkFyGhR5BK{S!A+m6v}~-F}#mhU;^0;NTPTwA}bGL2+apcL?T46E`sH4o2&pwHdEK{TdfVRpP^%%Th6Dm zTi(9A?-}<1SH9=%Ifwqf^9b#xbL8H|9WS(ayA6@TJWQ%NtOVvk8*>G^s;sDFM?TGF zl}sZZNV-~k>p3|~{LAubiikPA)dpsC31rS)bPKM!j?XE?WMyQu)?n$<_nZUg5XjU| z(5bmuF-o3Db>ylK&_8+RjGZLM{ur~*X`_m(ZlHPTkRk@{Aq)5*Z!pN4qzYKRclU+M zYQB|QUBx^!@&tKB>J-AZlvE#L@Z{m=a2pX%wVdGH1fM1t3$D72<|P)xtW$pT3iQV_#5K9N#tkWc*Zhj^E37x17V-J`#2E-ySw*pQYd9Qkjym=4~*2R3UN6|X{;@zFO* za`_ZV3@ms;q7UwkWTE_adb*kfjU+tEQ+3=uY4SA^T%25vdSaZ2+9V4ZFUex1`8hgM zcTbl)HbFXz?3@a=L+|)*MiLo6m&_D;#XYBka!wRP@pE64Rvm=$(w+;s-91=Nt^8th zO76)N{VtMN5DkQ_OC}tgSx?oFpw8Z56PX9SJnIYy?mwPo&V4nDuOknk)Tl7cig7P# zYnvcy<*Ymm2NTL82hWx|QPSp7>o^x?GsoZL7e}7;=d6+sOMTM0mQI!}@At4wlk?75 zC0E>)T-D@quT=GO%7Mo&(1Xc9olv{*q0WL;r+=)@qvGzgI@giuk-==4^xvy9*P{9* zu;i*MSWC;!zoe&Dz>T^`e557yDu7h0($dz6Q#L5frdWuEvP|NGJT3NE76LN0J1=Ymi(TApt9D%orLPqxsVq z&DQ?~4gL|l2F>aS4OnFygnW$UuVXBKI1LMBN}Hsv;p5nBhtXgd?}U5c=aIphu}5Zn zh9{Ss*&&YBR9EC7@?!400S0PsX60531y-wtf(NxN!d0G$3z$`$h!2v@bfmeH z*S=>06&m`>(;61TsIAN$)MQasS^s3?{>J)^J6XBSgNLZmviaK<^Ub`gs8mqDCNfXt z7LlJ4q1ZK%0&GU0w4^ASD~dDLv#e1ovL!{As7QzjC$;WsJ+*SJpNK&uuTznu?|>AX z$~-azymZd3`g3ln=((e}^i`=c`u5z?SJj!4?|G$vS5_;fO2NM6N~JQJzjL$y0mwK- A5C8xG diff --git a/text/__pycache__/symbols.cpython-310.pyc b/text/__pycache__/symbols.cpython-310.pyc deleted file mode 100644 index cf642dfde44d5be2c891d12a0ed2d93deb8da250..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 735 zcmYjO%TE(=5Z-N<7Ks>d9yDh4LP(^0@dQRiii!_J@!88}>HbPr+TFGLTP=7gU@8hK zkulKH-w$>hsqKCQU8h;bsfI~%&~Vwm3> z&Hv$cnxjq{8z3_QMrH%7>Tvp#Hkgk2~-MdG^VLVMSc=eRATTnOn8G( zUJLO?bF@K=7`$l^nO9Vvk~no(Hz`lG&BdrDlZda#VnuV0b8|)c0pGqOE9sT5cB0Qt zOz&TrTUoXfBX(kxw+>bwtMXSLKXJ0=)af&4YtPl4zfgbi(&Z~xuLY!rpiDxIVdZ*L zB&urqjhI2ro44Z4E#^;07E;g+J*(5ui$m*K=!5>%85n@atMfPoLof(WU>HVV6vkj2 z??4(RU=oL68fL!Dz%v|y=kNkY;T61uIe3lZkbyTazt#zFAqxwuORxw_>q~3B@D2yz z1AK%pu#5?8h0oZA?bv}ccn|Mmr=7`Q7d(Y2eDLEgKEy|u!fx!rK1^djPUaH1)?8bz zJ>QY<%6I2e_z4$q9y6H5_xXwZRDR4(_S(smo$Rrb19oz-vk1 z)nbi*mWdTR58Q-56+&2x5#C4a{b-w+2Q^N Date: Fri, 20 Sep 2024 16:10:14 +0200 Subject: [PATCH 08/19] matxa migration --- .gitignore | 1 + Dockerfile.dev | 2 +- README.md | 30 +- charts/aina-tts-api/templates/deployment.yaml | 12 +- docker-compose-dev.yml | 5 +- main.py | 12 +- models/vits_ca/config.json | 275 ------------------ models/vits_ca/speaker_ids.json | 1 - models/vits_ca/speakers.pth | Bin 28463 -> 0 bytes server/__init__.py | 8 +- server/helper/config.py | 74 +---- server/modules/tts_request_model.py | 2 +- server/tests/base_test_case.py | 6 +- server/views/api/api.py | 90 +----- server/workers/workers.py | 38 --- 15 files changed, 38 insertions(+), 518 deletions(-) delete mode 100644 models/vits_ca/config.json delete mode 100644 models/vits_ca/speaker_ids.json delete mode 100644 models/vits_ca/speakers.pth diff --git a/.gitignore b/.gitignore index c2c98f0..b2e9595 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ models !models/matxa_onnx/spk_ids.json venv/ **/__pycache__/ +.env \ No newline at end of file diff --git a/Dockerfile.dev b/Dockerfile.dev index 2b5a7cd..d345f6e 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -32,4 +32,4 @@ COPY ./requirements.txt /app RUN python -m pip install --upgrade pip RUN python -m pip install --no-cache-dir -r requirements.txt -ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --mp_workers ${MP_WORKERS} --use_cuda ${USE_CUDA} --use_mp ${USE_MP} --show_details True --reload \ No newline at end of file +ENTRYPOINT python main.py --speech_speed ${SPEECH_SPEED} --use_cuda ${USE_CUDA} --show_details True --reload \ No newline at end of file diff --git a/README.md b/README.md index ee137c1..0297387 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ tts-api has three inference endpoints, two openapi ones (as can be seen via `/do The example for `/api/tts` can be found in `/docs`. The websocket request is contingent on the communication with the client, hence we provide an example client at the `/websocket-demo` endpoint. For the `api/tts` the call is as the following: ``` -curl --location --request POST 'http://localhost:8080/api/tts' --header 'Content-Type: application/json' --data-raw '{ +curl --location --request POST 'http://localhost:8000/api/tts' --header 'Content-Type: application/json' --data-raw '{ "voice": "f_cen_81", "type": "text", "text": "El Consell s’ha reunit avui per darrera vegada abans de les eleccions. Divendres vinent, tant el president com els consellers ja estaran en funcions. A l’ordre del dia d’avui tampoc no hi havia l’aprovació del requisit lingüístic, és a dir la normativa que ha de regular la capacitació lingüística dels aspirants a accedir a un lloc en la Funció Pública Valenciana.", @@ -73,7 +73,7 @@ To launch using lastest version available on the Dockerhub: ``` -docker run --shm-size=1gb -p 8080:8000 projecteaina/tts-api:latest +docker run -p 8000:8000 projecteaina/tts-api:latest ``` [Check out the documentation available on the Dockerhub](https://hub.docker.com/r/projecteaina/tts-api) @@ -87,9 +87,9 @@ docker build -t tts-api . To launch: ``` -docker run --shm-size=1gb -p 8080:8000 tts-api +docker run -p 8000:8000 tts-api ``` -The default entrypoint puts the web interface to `http://0.0.0.0:8080/`. +The default entrypoint puts the web interface to `http://0.0.0.0:8000/`. ## Develop in docker @@ -102,7 +102,14 @@ make dev ``` > [!NOTE] -> The model **best_model.pth** is requiered, you have to download by yourself. +> The model **best_model.onnx** is requiered, you have to download by yourself. + +Download the model from HuggingFace +https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matcha_multispeaker_cat_all_opset_15_10_steps.onnx + +Note: You will need a Huggingface account because the model privacity is setted to gated. + +Rename the onnx model to best_model.onnx and move it to /models/matxa_onnx folder ```bash wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P models/vits_ca/ ``` @@ -128,7 +135,7 @@ mv models/vits_ca/best_model_8khz.pth models/vits_ca/best_model.pth **NOTES:** - ssml format is not available yet. -- Currently, only "ca-es" language is supported, and will be applied by default +- Currently, only "ca-ba, ca-nw, ca-va" directions are supported, and will be applied by default **Successful Response:** @@ -151,10 +158,9 @@ POST /api/tts #### Command line deployment arguments | **Argument** | **Type** | **Default** | **Description** | |------------------------|----------|-----------------------------------------|-------------------------------------------------------------------------------| -| mp_workers | int | 2 | Number of CPUs used for multiprocessing. | | speech_speed | float | 1.0 | Change the speech speed. | -- mp_workers: the "mp_workers" argument specifies the number of separate processes used for inference. For example, if mp_workers is set to 2 and the input consists of 2 sentences, there will be a process assigned to each sentence, speeding up inference. + - The "speech_speed" argument refers to a parameter that adjusts the rate at which speech sounds in an audio output, with higher values resulting in faster speech, and lower values leading to slower speech. @@ -168,22 +174,14 @@ To deploy this project, you will need to add the following environment variables `SPEECH_SPEED` -`MP_WORKERS` - `USE_CUDA` -`USE_MP` - -`SHM_SIZE` Example of .env file ```bash SPEECH_SPEED=1.0 -MP_WORKERS=4 USE_CUDA=False -USE_MP=True -SHM_SIZE=2gb ``` diff --git a/charts/aina-tts-api/templates/deployment.yaml b/charts/aina-tts-api/templates/deployment.yaml index ad03f62..f06c616 100644 --- a/charts/aina-tts-api/templates/deployment.yaml +++ b/charts/aina-tts-api/templates/deployment.yaml @@ -14,11 +14,6 @@ spec: labels: component: tts-api spec: - volumes: - - name: dshm - emptyDir: - medium: Memory - sizeLimit: {{.Values.api.dshm_size | default "2Gi" | quote }} containers: - name: api image: {{.Values.api.image}}:{{.Values.api.tag}} @@ -27,9 +22,4 @@ spec: - containerPort: 8000 env: - name: "SPEECH_SPEED" - value: {{.Values.api.speech_speed | default "1.0" | quote }} - - name: "MP_WORKERS" - value: {{.Values.api.mp_workers | default "4" | quote }} - volumeMounts: - - mountPath: /dev/shm - name: dshm \ No newline at end of file + value: {{.Values.api.speech_speed | default "1.0" | quote }} \ No newline at end of file diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index bab2de2..7459165 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -7,12 +7,9 @@ services: container_name: fastapi-dev environment: - SPEECH_SPEED=${SPEECH_SPEED-1.0} - - MP_WORKERS=${MP_WORKERS-4} - - USE_MP=False restart: always volumes: - .:/app - ports: - - '8080:8000' + - '8000:8000' shm_size: ${SHM_SIZE-2gb} diff --git a/main.py b/main.py index fcca449..ae12d62 100644 --- a/main.py +++ b/main.py @@ -12,8 +12,8 @@ path_dir = os.path.dirname(Path(__file__)) # Set the relative paths for the default TTS model and its associated configuration models_path_rel = f'{path_dir}/models/matxa_onnx' -model_name = 'matcha_wavenext.onnx' -vocoder_name = 'matcha_wavenext.onnx' +model_name = 'best_model.onnx' +vocoder_name = 'best_model.onnx' spk_ids_file = 'spk_ids.json' model_ca = os.path.join(models_path_rel, model_name) @@ -44,14 +44,8 @@ def convert_boolean(x): default=True) parser.add_argument("--port", type=int, default=8000, help="port to listen on.") parser.add_argument("--host", type=str, default="0.0.0.0", help="host ip to listen.") - parser.add_argument("--use_mp", type=convert_boolean, default=False, nargs='?', const=True, - help="true to use Python multi-processing.") - parser.add_argument("--use_mth", type=convert_boolean, default=True, nargs='?', const=True, - help="true to use Python multi-threading.") parser.add_argument("--use_cuda", type=convert_boolean, default=False, nargs='?', const=False, help="true to use CUDA.") - parser.add_argument("--mp_workers", action=MpWorkersAction, type=int, default=1, # mp.cpu_count() - nargs='?', const=1, help="number of CPUs used for multiprocessing") parser.add_argument("--debug", type=convert_boolean, default=False, help="true to enable Flask debug mode.") parser.add_argument("--show_details", type=convert_boolean, default=False, @@ -93,9 +87,7 @@ def convert_boolean(x): speaker_ids_path=speaker_ids_path, speech_speed=args.speech_speed, temperature=args.temperature, - mp_workers=args.mp_workers, use_cuda=args.use_cuda, - use_mp=args.use_mp, args=args ) diff --git a/models/vits_ca/config.json b/models/vits_ca/config.json deleted file mode 100644 index d0f8728..0000000 --- a/models/vits_ca/config.json +++ /dev/null @@ -1,275 +0,0 @@ -{ - "output_path": "/mnt/netapp1/Proxecto_NOS/bsc/tts/training_outputs/0.14.3", - "logger_uri": null, - "run_name": "8kHz_vits_V2_lr_diff_1_5_m4_hop_96_ignore_teo_reduced_finetuning", - "project_name": null, - "run_description": "", - "print_step": 25, - "plot_step": 100, - "model_param_stats": false, - "wandb_entity": null, - "dashboard_logger": "tensorboard", - "save_on_interrupt": true, - "log_model_step": 1000, - "save_step": 10000, - "save_n_checkpoints": 6, - "save_checkpoints": true, - "save_all_best": false, - "save_best_after": 10000, - "target_loss": null, - "print_eval": true, - "test_delay_epochs": -1, - "run_eval": true, - "run_eval_steps": null, - "distributed_backend": "nccl", - "distributed_url": "tcp://localhost:54321", - "mixed_precision": false, - "precision": "fp16", - "epochs": 550, - "batch_size": 64, - "eval_batch_size": 8, - "grad_clip": [ - 1000.0, - 1000.0 - ], - "scheduler_after_epoch": false, - "lr": 0.0007, - "optimizer": "AdamW", - "optimizer_params": { - "betas": [ - 0.8, - 0.99 - ], - "eps": 1e-09, - "weight_decay": 0.01 - }, - "lr_scheduler": "MultiStepLR", - "lr_scheduler_params": { - "gamma": 0.6, - "milestones": [ - 1000000 - ] - }, - "use_grad_scaler": false, - "allow_tf32": false, - "cudnn_enable": true, - "cudnn_deterministic": false, - "cudnn_benchmark": false, - "training_seed": 54321, - "model": "vits", - "num_loader_workers": 4, - "num_eval_loader_workers": 4, - "use_noise_augment": false, - "audio": { - "fft_size": 1024, - "sample_rate": 8000, - "win_length": 512, - "hop_length": 96, - "num_mels": 80, - "mel_fmin": 60, - "mel_fmax": null - }, - "use_phonemes": true, - "phonemizer": "espeak", - "phoneme_language": "ca", - "compute_input_seq_cache": false, - "text_cleaner": "basic_cleaners", - "enable_eos_bos_chars": false, - "test_sentences_file": "", - "phoneme_cache_path": "/mnt/netapp1/Proxecto_NOS/bsc/tts/training_outputs/0.14.3/phoneme_cache_festcat_google_10_01_2024", - "characters": { - "characters_class": "TTS.tts.models.vits.VitsCharacters", - "vocab_dict": null, - "pad": "_", - "eos": "", - "bos": "", - "blank": null, - "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", - "punctuations": ";:,.!?\u00a1\u00bf\u2014\u2026\"\u00ab\u00bb\u201c\u201d ", - "phonemes": "\u0251\u0250\u0252\u00e6\u0253\u0299\u03b2\u0254\u0255\u00e7\u0257\u0256\u00f0\u02a4\u0259\u0258\u025a\u025b\u025c\u025d\u025e\u025f\u0284\u0261\u0260\u0262\u029b\u0266\u0267\u0127\u0265\u029c\u0268\u026a\u029d\u026d\u026c\u026b\u026e\u029f\u0271\u026f\u0270\u014b\u0273\u0272\u0274\u00f8\u0275\u0278\u03b8\u0153\u0276\u0298\u0279\u027a\u027e\u027b\u0280\u0281\u027d\u0282\u0283\u0288\u02a7\u0289\u028a\u028b\u2c71\u028c\u0263\u0264\u028d\u03c7\u028e\u028f\u0291\u0290\u0292\u0294\u02a1\u0295\u02a2\u01c0\u01c1\u01c2\u01c3\u02c8\u02cc\u02d0\u02d1\u02bc\u02b4\u02b0\u02b1\u02b2\u02b7\u02e0\u02e4\u02de\u2193\u2191\u2192\u2197\u2198'\u0329'\u1d7b", - "is_unique": true, - "is_sorted": true - }, - "add_blank": true, - "batch_group_size": 0, - "loss_masking": null, - "min_audio_len": 8000, - "max_audio_len": 100000, - "min_text_len": 3, - "max_text_len": 175, - "compute_f0": false, - "compute_energy": false, - "compute_linear_spec": true, - "precompute_num_workers": 0, - "start_by_longest": false, - "shuffle": true, - "drop_last": true, - "datasets": [ - { - "formatter": "vctk_old", - "dataset_name": "vctk_old_mos_filter", - "path": "/mnt/netapp1/Proxecto_NOS/bsc/speech_data/fest_google_vctk_denoised_8khz", - "meta_file_train": "", - "ignored_speakers": [ - "teo" - ], - "language": "ca", - "phonemizer": "", - "meta_file_val": "", - "meta_file_attn_mask": "" - } - ], - "test_sentences": [ - [ - "Per exemple, dels nostres bancs que inverteixen en armament de les nostres empreses.", - "uri" - ], - [ - "Preguntin-se si aix\u00f2 era necessari.", - "ona" - ], - [ - "La suposada ocultaci\u00f3 dels informes que advertien de risc s\u00edsmic.", - "pau" - ], - [ - "\u00c9s de 633 milions d'euros quan es far\u00e0 la publicaci\u00f3 detallada.", - "eva" - ] - ], - "eval_split_max_size": null, - "eval_split_size": 0.01, - "use_speaker_weighted_sampler": false, - "speaker_weighted_sampler_alpha": 1.0, - "use_language_weighted_sampler": false, - "language_weighted_sampler_alpha": 1.0, - "use_length_weighted_sampler": false, - "length_weighted_sampler_alpha": 1.0, - "model_args": { - "num_chars": 179, - "out_channels": 513, - "spec_segment_size": 64, - "hidden_channels": 192, - "hidden_channels_ffn_text_encoder": 768, - "num_heads_text_encoder": 2, - "num_layers_text_encoder": 6, - "kernel_size_text_encoder": 3, - "dropout_p_text_encoder": 0.1, - "dropout_p_duration_predictor": 0.5, - "kernel_size_posterior_encoder": 5, - "dilation_rate_posterior_encoder": 2, - "num_layers_posterior_encoder": 8, - "kernel_size_flow": 5, - "dilation_rate_flow": 1, - "num_layers_flow": 4, - "resblock_type_decoder": "2", - "resblock_kernel_sizes_decoder": [ - 3, - 5, - 7 - ], - "resblock_dilation_sizes_decoder": [ - [ - 1, - 2 - ], - [ - 2, - 6 - ], - [ - 3, - 12 - ] - ], - "upsample_rates_decoder": [ - 8, - 4, - 3 - ], - "upsample_initial_channel_decoder": 256, - "upsample_kernel_sizes_decoder": [ - 16, - 8, - 5 - ], - "periods_multi_period_discriminator": [ - 2, - 3, - 5, - 7, - 11 - ], - "use_sdp": true, - "noise_scale": 1.0, - "inference_noise_scale": 0.667, - "length_scale": 1.0, - "noise_scale_dp": 1.0, - "inference_noise_scale_dp": 1.0, - "max_inference_len": null, - "init_discriminator": true, - "use_spectral_norm_disriminator": false, - "use_speaker_embedding": true, - "num_speakers": 257, - "speakers_file": "models/vits_ca/speakers.pth", - "d_vector_file": null, - "speaker_embedding_channels": 256, - "use_d_vector_file": false, - "d_vector_dim": 0, - "detach_dp_input": true, - "use_language_embedding": false, - "embedded_language_dim": 4, - "num_languages": 0, - "language_ids_file": null, - "use_speaker_encoder_as_loss": false, - "speaker_encoder_config_path": "", - "speaker_encoder_model_path": "", - "condition_dp_on_speaker": true, - "freeze_encoder": false, - "freeze_DP": false, - "freeze_PE": false, - "freeze_flow_decoder": false, - "freeze_waveform_decoder": false, - "encoder_sample_rate": null, - "interpolate_z": true, - "reinit_DP": false, - "reinit_text_encoder": false - }, - "lr_gen": 0.0002, - "lr_disc": 0.0002, - "lr_scheduler_gen": "MultiStepLR", - "lr_scheduler_gen_params": { - "gamma": 0.6, - "milestones": [ - 1000000 - ] - }, - "lr_scheduler_disc": "MultiStepLR", - "lr_scheduler_disc_params": { - "gamma": 0.6, - "milestones": [ - 1000000 - ] - }, - "kl_loss_alpha": 1.0, - "disc_loss_alpha": 1.0, - "gen_loss_alpha": 1.0, - "feat_loss_alpha": 1.0, - "mel_loss_alpha": 45.0, - "dur_loss_alpha": 1.0, - "speaker_encoder_loss_alpha": 1.0, - "return_wav": true, - "use_weighted_sampler": false, - "weighted_sampler_attrs": null, - "weighted_sampler_multipliers": null, - "r": 1, - "num_speakers": 257, - "use_speaker_embedding": true, - "speakers_file": "models/vits_ca/speakers.pth", - "speaker_embedding_channels": 256, - "language_ids_file": null, - "use_language_embedding": false, - "use_d_vector_file": false, - "d_vector_file": null, - "d_vector_dim": 0 -} diff --git a/models/vits_ca/speaker_ids.json b/models/vits_ca/speaker_ids.json deleted file mode 100644 index 0cdec47..0000000 --- a/models/vits_ca/speaker_ids.json +++ /dev/null @@ -1 +0,0 @@ -{"f_cen_095": "caf_09598", "f_cen_092": "caf_09204", "m_occ_072": "pol", "m_cen_pau": "pau", "m_occ_7b7": "cam_02992", "m_val_51c": "cam_04910", "f_cen_063": "caf_06311", "f_cen_051": "caf_05147"} \ No newline at end of file diff --git a/models/vits_ca/speakers.pth b/models/vits_ca/speakers.pth deleted file mode 100644 index 6fbeac1fd80d75ec7d2d9a245fcd745ff53a462d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28463 zcmZ|Yb-Z?6SuNlXP{73Q?m%4~c49081I2cwqSQjb?(XjH?(XjH?(RN+pKCm8r@QaIhd%Va|L6an?Y`%@Z+_DH9*=qI{wmx2ny(AMKowtY1Y?L3c7 zGqi2H9^0_>KD4aLec!jWuhy}ww$b0z^}HNiIhV)SRmD-wMY-%v)s}Nx&vV-}#a6aM zGZsaCEVU0PwxKT02Tl*T7E%wMZy)Bxuj-b$-OH{Sj(*+e?bt19U52{tie)>FVX4Mt zX*^oj@5kB{MYs1$zt-JS*4;MeX{)&}s_xjjVyMQ>4zKgL9`jr;z120x(C>9u*A~2w z)p)e$gQf>w3u!x#SbNU0nfq-m_hT>Drr+wZ*ov+lkLB2owYOtcGw#FUuZv}_n{HkE zZfn%I=2Xg$iIAKT-6y6HjJ zLVAa6zmMVA+GDAT{a9?!UT@X5979o+jSndIqFycKIEr~MkG?&cb!dvNm~BVl*_yIw z=X%_hZJ&o?JnDAXJ;kz&)v|2$*3|8Mv|BZlmG!NS@349hsM_<%?H!MXj zBFn{jHrqaQbK5UV)$O&w>-@6bPR@3%b6NtYANsj0i*+5Ax-V?`J{O}?KKe>zbW3*} z%Tl+?=CA9jE__?Lt-BMgm%85Dd8?0p?A!Cf)6-uI8INvXq{`ZkTi1={SPtXt!IMLM zy$r{)jpNv@ecjFCt*U#G+igQL6#Lxt#kh^dFz;Kjjqw0aEqT1`Zln(8f_#XRyU8}k3hH=@Np=x#yTbKPX_kPp5_zgv6 z7Z;guK6HA>wUD)wbjm{6cU4)6Tw6ab&9E=~Hq6y3+a zxf*OpzqWf_HA>JZwrc6TirLQp&&}>v7JDJs#-*=SlUeZ3Yq?Z=S)b1^J@i`0HV?I( z^=*B(mx8rw2L+&3=*nd&{FbU(E8(*{_o7~!C7&@=9(vuDbvX9D>ic;Z+CzYj!&sL? zIqr71?8~<5Y}nlUo&7Kzoevw@YHzl-E*nLsUe0Hnp5a=^;W1lPpevi+UoDmID%N5h z$9nGPZjiW+(l%8^KH~7ui~1^S`n@ZPzU`WE8~m2GT=%}TjAI%4VO$;PAVZ6>oR=I$ zWmiRqV|A3x;7sL6eLmCljJJjO$-|D;bx|j{Y^KFZUEyDfg?4i`n!q5~n{L>Y;B8*y z^E@Y*gj2n#^zAsTRV9YXV=dQm9@Ouqu_5zTiOgLP*1&dW7n>7S#a!vL<%TSg;r53klRrnvya*;Av4$qWwl5|uZ7Ce7S6ue>cpD^ ztNlg4DjQ2DkZt-lEdKu(EV7W8+v+2lqOPj*S*B;c7Si}J^{?0!4HZRf_2alYmabAo z+tJ4Nvqz9ZyC2>K?>3aRd{=O)R+M)2-1>DzCr|1_`Lp1cd7uZpFq6tT7}#=YFeP1NYEqg}Qc{ZKT; zA)4ELnX9AH59Dn>_jSAb*=F&}6Pu#GE#}lA1*h*1`&&A%D!FTQb1PzdDb(se?a?)wj)EakH5)~-TUt+G+xBBq>t@CFR$3)` z>Tg-+y!QI=9~%DP_ljWAB`*do&2g{ec0Swmuxlanrdn=|7}>@$g>iQFU3p{&mcqXK z%wsOw#i^|0y6(kVS>?92vo^eS8p+mlw#PSlj2=8`tF>;18Cp<;+ipMnug;n5gYw;K zAlurF#|Yre)%onxvt0{OASJ7+<5aapF}aA>t#Y>Q;uw~;YK82!YRAQ5BgRe6Dqn)j zua{z7vv=dFt+Y+A)bIAA-%6EpO{pFhoms;4_EvC%R>G>bA*)}~Ea!7f&wjTMaHCo5 z!01d{KT)-d*6V+4t&@g-HXjT0DE5P2u6^yTU*5I*DxrNSxZ)i8b#_|f4T7-Rw5zpJ zf*Y*^&hx=*zZ{l2D~{VJ<_@T0J)d)Wj=P2I8d_g_jJj@MjC!wX_$hq?jA+NQZL4t| z3y5iJm0N`#WUOll2reAq8z4!1OY(NFc5zYk1F+aPN?F^Aa7Ql<=JtSl+FpbLcr930 zEl}h6T+?%23sLrua$SX%Cf3z@?O}Id#Kz|q4!a$OrdEbjZDo902O+jMi|>@0f&_9X z)r(ox>Z(=sYy?dSWr_`rTkuC#7<7Y59mD}bB z1dw2;7pT=a>ch*f325bAx3`*FTUR~MqQvx?vb>eW`%;VHVh#jze0qfX*|e=+b+kc7 z-K{<}^(~3VMGany>Hw0@=b4`SZXvQ8rWmRl0?|6*TNS$M+ob}iv;+TZZ`Te|=ye;d zVu0wi(q1zy=33wBhjz1hTI}8`Wbk*fTIoJWyrcC?ePx9yM5s^^yc!LBvJGo@KJWBA zcMI9JdbhewT-fAZ(eW)BZKqf6+qk!bL@H%!zb*$XIw+MRjNU5eI`&a(X}jPBFEvl- zSa^X()ligbQK4t|{U%vyvGDN=MhcpKK*7mrf4 zlHY%73<4Dfgt7gW3S!)V`E9kiI$$R-jzi(dnq=EC@6|AuTFvUKfPG@RYe1_45Vc~| zB0Qw_sN?zk)AQXHqPchArmXDhtya3h^@MGyQP1i9O&%v|pVsFg^dZkQ$S!wr#V4 zV^y#F2(DL}hR&dL4gM2C*UD#b_eOxA9QmxN01x7|Ok?aZA=sePY1dLEP)mD6X$8L% z*+^L1;5Is`%^y%AOsj{ytL1#b=>@KZG@#$I&IO*Loq`*(r`}41dL0Vou^(|A;SY*gHV|cAL`fdhh{Nv&cV~~!B|631u(uYmGahTR0!Ts1QNG<1M!aM z3r#P0EhMS7Itxd3TQTyxbiH3xmRJgO27s?4lIz?ZqEFX36#h2m{o1rPPo*tkGG^qj45HOe+B zt0Er=f34?;JRKR^?E8I%@NjASGKSH#+gLd}ao74}CDzku;eh#ezUcHK*Fu!uP}y3k zg1+)$O;c;%frhn)egI=WP*NYVu;~D$QQ~cg9q6o(fJ(=TChz!Gu!j;N&o*l=3guZ1(xwlXQBhFr)(W*hG#{;{ zg^|Ur{wR=<4;95ydU1fKb^U^nEo!e0w1aVebzdC9xg8K9fu(3mpnV6+aa9{tpD#YW z*xf?9QnN0V1!Z*xll6L)K~X~z>Rm(D(iVN#)9=!_kmytrWFPJXa+Eo}P1zv6ES+c9 z68x%Eb7wwFKn0>g^&9j-Unt=Qjj-uP3~AiYmzZAsZXvz6SZD=}4Buxc9HIk6oQi&d zCDdC48fepMP>O0K1lcJaQDPyW>I;H19Mvca@ye>uS+{^8+D<5#-`7cfy3=S)LkEqI zsK@9jkK=sF=_T$Kf~A%{%%VELf>;F1t8&<7Td2?L$+&3Kclw+lvESViK|&IbqF`U8 z(5>;}ZU|Q3qaCSLcu=*e0@n#flq5fZqlrSt5B31(3q0~t(@S0p0Wk2Oa>M_7jS9-q z52|v8r>q$!X*L-}gMj|3p(&yB9%d8Z+99B_froY*Bhr)x>?=e%yis?n$OWap6CcX! zl8jKEZIp1g`F3_bUwV3}YayPG7^P?szBC*nk+SSlVRQ*72M)^J;@1sWK&1hZ_(%$s z((9Mc5WMz>p~@HwmqQqwO({?sV%m(OVN~|G29A*C5g)CFC=L|ZxV0ivCWT3XDub$wN~8l1dY%g}Pj9I7IpkR<(g zueOvU(9)&&uPffSAVT(4n^fK2%QjZQ&5y%A6B9BC#&Qr}gZ3PqHOjUzb z)=hH&h_sl%aXOV05U)iY)hjw0;-pDNC zl3exCS?LylL8YK7tXf8yNUkYl7!D$(J?zDNzWj6vzO}rzJjb>RtUS|^!l*iVP^E7x zJnXKV9Llrus%t1Qg)%{=f#V$H>~9`1t(fqXtVQO;NoFaI3j51H6V?z4nujq;`9pFLY!Zb&zjpdhz|2_ zA+-Vb-1Zh(E+M_6E0ht$C=sNDDEPE8gdH(fw;&Srexy)Qv)~ly&AzG7x@N7tw0dPf zVl!w6_M}1zOdlqc+VkPlD_#p}?eN%;-=o5eH{EuY$x7pf(JH4^is=e{i3^$0w0bPP zHwUO~(cY;*6smAk$q#eW?*O#a`Sn;SY}68BWj&UwnDrA*bT5T1RJ|?dD@_l-TZqTN zwy~%#j!cAt3(9kKzVh@+*Fpf8F)|CBU`JIjN~oSn?B7 zD_;xg$yvGso)XnjsS%S0CBIN_A*pKVmtg$TF9HCt?k-C9KGSZgW>JVqvm?!(K1^GW z;6?ay#uQ>|gcE_Q7IJtPT7f#{bJp%C$=9ghSxm2Tw~%`CY(Qzg18axqvC{>Y6!KxU z5=MQLy4D0`QQHB1i##Y`;*%7Fh-$yEpU0PX$#zI3WWA>b*ifCQEDrn#_@s0zljNa8 z(cg-*oQk`JD9&NLX`)~!{f+KTk1{@x1z1Wqz@cmHVFxS#zY}J3Fmy#bG}}?FFi3L? zGNhI7q=V?Y0JYT7W^M&`Atz9FOlklQ0k)A~{1wfQSR!0hs4rPo!M@8x= z!n4Q-;SOe1^#;9>(jtD1E=qi>nPqR$p3PKW3(-dR;BW_5z!RFb z+D5L|(#3#O*2pCAYEl5aX?=QO)EsGH3$%2VO!TShM{Dn(C8R_D))nDeqKz7zA(IZ= zB7ogGE4E_ZUo?>5*-p*1kU%$>U3MbWi52;`Kt-LBugT$~!6_bCZ1@5|n>q);8eTO= zi4^O&E3S+b``iAADvxU2QULnUVWQ#?B^5C(0)_3wEYawK%esA`~k2b6WYb_DO(k0MD0BMGZH0BvK#g|5ZIx>)g zl%(YWN<#ZcytGFu&7IJJ#=OEoVB4CO5ty{|D}wTlv<}B)SL)t zl#IZW&)8&&@(vv7+@rFAF-;6hq12ubjy!7uh~p}U998LQR$ zQgj`}9#e;ak)uOOO7viclJeW0uQt8v-9iokBAE|}R%!q;=?gY>w+aF{9m0=mGY%Wj z^OwfYBi5pgR~ChLb_$po7-dl=RDiQM(6dccDbos^3JWU$m1-V@POZV%DkFPx+WG3! zt6d9Gat()xWIZ`-q{s^j!ib~xEa0HL^Ze25%qj$8ZyJSo!jI`86?7X}4?$ESP;YX{ z5Q$__Wf5F*Rzw4!JdsotLIVZyccQ8%z%J)&Os{?|1iRjV3OS=&>UZ#i9$VRAr$q9H z5ce&`kW?(-&1!)PT9x5}t}V$#DB;a|4HDHI8LXu^KI}fvdZ+7!AZW)7%7}Vauj^N*Jmq$$)Ir%U@?5c&qD014;t} zG0hN+(lDtEjHJPFp%NnI|<%E#(b~yMBDLY?ldd+JgK@rLh94b?2yAmIa z^#Rs^!;Q1hP}op!>I#~T8w>xXu-Rl1!T=GX(#$|_N{|TFM8K$}LYu=JCA1a^Ng37A z;etaNJE9+`hdA)G~-sexsJ$qK2tmI=-Vs6t<`I{IFr5xg79%Q7#T zJ8^Dy4k1taHkwJ!Bi-QdcIkZG>28Y2NJnO_#wnSgsA$9EAoYpM;_E}d_wu@&_%kZ+0h6lv8Nt72I&pM2z=pofC$brzaS+@Y7Y)Bap-&pB3kl7r8UjVc!l5!mprISRhE{yZ7 zhQerQY(!*eqElv03H3VH4x#ioHk0fix=GWZ0;czf=?!iRLF)LV95i+cV-ZPhne-o7 z7T_P#l8il|sVGam=pz5yL1vg;#Y8OhDWKm3nqdS1Q^|ooBC!PfG)Wo}1N;DNn zLC9pbk!ju@9Iw&~W78Q%6B@4(fgoIz40SML10qdlYH0gwi9o1%qv;K=g`^CsMVgEN zBKsSkharNkej-GPd5Z}~!Hk(ptIL#EzgI6wF$2`M1Ap)ayaSeIsJE&v9y*l1)(#CO zMOJ84wQK49D|d1k-*!-(-5@Ya!a5 z%7dutrlL}`ajf`6%i3-h(@=hCoAsFJXf;LisF3;?ALyannc!bLvua26{!Wi49edn+7AAna9pI zou=DD3?(zovjcz`00Zr_B%!jv6{H1261Pan+()4;1mVbXSTa~ZhxUNZkx2kV#W#Ui zS_QRcu$8u7m1H?l(>T8}7cgz11*00srP3*y&o`Ui^jZkzP<3&Bx;BxOOidriSWr(T zrV@!HFMX3=jk;#41+=Ii8err&aX90%;h#hR!&XSl%%6gR{27M|{&@Nvi9QZ*foqwx zCDmi;TDtSir#HJ6;t-$@b}_0Yz>#?u;1&Ex>@kE#V-xjINhDLWJ31zG=yOzbHH}K#q{RaLQD!wlJkq@yZBQj zh@zTA;HadJOGn=_xL3OIa*4D9DL`^2u2`Fck0Q`ASw!z5T@d~W`Qe{11V2i}F&AH2 zg#n5ZLj^Ulv@ylDosXQ};#x@Sn~0Pq4U}V0Khi9XQq9Du!89XeyMi4V^WVmtIIWVQ zH#rE#JZBjdnpQ-LH4n5GH_uPp3Z!%sTK$KsBO~VxisWZ9&@h$6b;I=Da(d*oklQ2c z)Q|^>(leUBB#q#?LH+>eo-la=)(;Yh?C%IjP$np_D)6E*DKO|aET(fCQIee1S;~-y z7Yp$gV(l3lf};#KQN)RLfOdPn)%2FvLYSkZ9x2{>&4bK-C}K*2QVR@)*Q*g!4pt|8 zfHt6D3Of@=C07--SpYpZm~sauwKSR)j#uy~R;G3>EK3wY4k$F>rz`*$f_G)vv7K){ zz16jllw%z{VzorzlhHZqF?vJzf>I8HbETz6nd{Jv41pC>jYHm2v6Zbzfez7gzye}N zo~gNNX!Nuwwi-T&fbH93&^s>1;n1ED_493}x4v5lVq+peLE~_SCvX(yz%1Rt8{%b# zrW!?c1e!UGs5}rdZwyMVS3{}PGh;~9l6o0$9Ic59P3Ml5LGgAPI10Utq|EK4Z0g*) z3jAvT{ix|}u7!~8{G9PQrO#+|ar_2TU>O?hBb@1=G1Y*n>}ZObmcxF9`BQiBc85n1GLFRLl!E4b+v!o)Ldf&5E(irq0+u>6 z*8}yIT}xQY{tW0btk4@e5|is(P(L*^3CLnDO{gE@uFE#fo*7NPBM_&u^8>uN7b$VD^&E1SNH_#b~K^f5XfjEzY z^&$|oI!(N*sTSEX4g{Y#$fJLqVEU22n z(K}3Ue_MzeVd=`Yzc;f55M!GQltM7EPj3X;an>U6MZG0~{AwG??)bL{|Ejucr->3~%I> z0pd2+p6m0Srgyv+!j%V$MTX>8%eWA{)NUCd(jS$=MH(LTR&|v3gshw}2%*-{DFz0F zK^A4-MMe7!&yI`|x2Tkc-HgaY+U<)pBdDVxUK`9K zekliLytI*oQ#d~$GUE}b5NQatgz)uaDTmdd-?7;l>8UurOBNvO<@v7DyWB0rRzZ@1 zG7u_Sk18>6#=NU{kiBA>)@wMJ5d#B$S6D{;*SBZ-NK?wK30q>f6bAAn%|ee>Qv3@* zVF;m#=H395MktZD_-toD+_ zxeN+!C49vRN*|M)xIwvHk+h5(H4u(PSdivUVrB6_Ratlxb3ns<{s5Oe2&B)0xEmbJ z)4N>@$)^(4$v9$-Ln_X15KimO969bY`yF$D4|ctS?BJW@Dz-YBh0s=hV;8cmov|`W z9lI3%CSkrE+$0qP5E0rHMKcF|3?yJrQ9`$KndZBNfX?Q-khwGRbg3#DB#1%1i0Z~o zoC`7R7{SLT@XHZP5s;+s06@IQxR6zgEUKVNAe|{Q@QaSmhe5rs)am- z5|r(^PRr94k_$5cs-A(8%2J(!JS3{M2FVEM=@}+fYO6BFPDLrnEh2rG*p1r}`dPEk z7dn!f86yv8v67@Pn||@QI#f^(L!yQ&&$kO6IP^@b0w)jM&Uqn8hXM& z`NRJie0{voNr+G=`DqO!gex~MQQi;;>XA9xnwuMJnrz{A*V869oYZCn0Z!2;L44p# zXk!^Zz?VjWb?(!4EhJ9QU^ruWU_0uYPXa4EuL|y9ISqK9W?BWjth0ciC7l<(HL)4! z5jdI=y+T3`)|X8+)FIKSWfC`#<~_6zTH%Zlzxo8?>GMf@$9f*qezy>FQ^hknkShxf z;|P-k>;$7ZEz=&Ec=I92z3 z-Bu`F9K*6`sSFp8BNwvdKR~m-%YPbf#MxNcfRNJ znA<|e z&$}5FdnJ#QKf5MaXw@2sB@xd65$ptj(@0D;T^M|?={>K7#0=mYrK{}1++{?r_z|Td ziGX@YD(Ow9bAqrJc@qML+%sF#T!==}!3a859pifgUzG@w_8B_1ZxNjaQS{gRFReET z9v2R(w`}KQr}w%R0@Ro#%5DVB?ZDtmKtL1*-3c2|l^(!KmkDN)ugIW|AR_8j3QkMr zOydA96@(H@^JQ$14h_f#)%xfVBXdRfKGkO`Cy1xCjs%3 z19Vjq1^tagB{t0Jlu|4k9fFmKrEnoo5sqb?PoxXhk4t^#bV4R9#=|Y=6Q;*s3z79w zG6E(&j4u+3GaE2aKvvOqk%8v8aS#RDuBAC^)Rs#&w6*+Sf^@kV65(SFRXL4_sI2O| zG8zN$3u5Dmq#x6wKFLc@41pleCr(eeTL{}GLaGF%W|D+hXkwhDo9RMvKV{UEfuxNM z9^yS#5p9vZk>CPb^^0QZD`+`Y3s04OK|+{IW({z3#A12xSz=yppn))>qCU^*iPu7; zlqRHx$$q;69vS>+q~StF7%@tR850g!DqL}bI*AoDxy&Qj2M;|SUzKPebruXl#4{2? z13iFjWIIFSq_N2@L{028BR4aJfVb{^(sW)60iq-f^ue`4T*R`N`s{-18csn96jsqTnH{4HQ3MQn82Uj{xH8y0^S2}+#imX~OxVH-{k?CFph0%y zAK?`+S$2BH8Yp@yFKvw^Mb63P)0h#35{WD2kGSaCs)?KsvQY*+2Ctenzo&5krbj*v zw{!tUa^;!Nr%X@2EkuNh4-`C?gmQBz@-d)c(0qfex<$r`D7n%tl?_hld@vK16|pNxOx<7kc&2{VE{&iJ?TX8pk@N= zXsmi~ly$o3yz+ z=oH~c3Wlb@c7~tnvg&VKxcTT@kX9|QGcpNG4jh}V>!RGI}h&|IAbY(?@z!?=`wyAh!X)6XKBkAjK+cWaIdVR1Go2!C-;{`^Jy$(D{I* z^mxAi^nP~>(W7pH3|A+(jIu&-rXq&ij5}oBJw7R|Ezr~{5XLM${1%nUbt~Exk#<+F zQi!K1rGoKSY0f@?DhfNkY+VcMYi80|?lgJKGv zaPzpiCN&HEu{u9+`haU8JjyZCR58R6c<588Uh0u_q6orm2qnQn8tO)nsqsUodM@S) zZKw`Z!e(?f!j9Gg)kjL@4~doi8ZzN-ln(AkYo!73XNU+gVqcygG=1Q;5Ij^IK>aCv zhD%TaP!of*P=)w(d+s4W!;_;QnmOd|1NBG_lk&36)^L({E6p`m*MMknBGzbi0%qr) zf<#@HCes<{E+E` zuZ7qxlYSelM-%Bx=RKcojWLuzK99$xK$>8~Lz)z}+=VuHi<)7srTw@XDV+=LG7>u>f$-oSNO-3G>%@# zH`v3!Z(FAGd9{W0xvSzaiSbd$^jYdHRWK<-V^sf^GVVmlnXtL|inYO2@mt=Lkn++G9V70_+*sk4lusIg*inIG z>xOsa%11hhaIMfIRExPdmO2^_#}1sRgWp6CP%q^Oj@-uc!>13sTL>C?6N*_!@+t_& zmvcD}gn-Bhu_nApH|I5F5k*P!nG=!o?2>L2e!5 zfNsqxfu-9!nNeW__O4P+uWamc*fstBOVg=9BsqdX+3`vivjTt8v7P|OIzfdb^QFfGvI{K)Aeu7w!d zK@n-as;X_(6)DW930=ZajO4*xF1(XO^1`Yco)D5QB#WwvI0GIlMFisRCdNr^p^NkC z#ps%9o?J3DAQ&KEmWh1icBPrnb5p%Qx$ulWK>G}jx#w}O7 z(?5Na;^KJlWGc4IjRg!dhWu1FtLAdEP2BM8Qv_R8331YKNvdcEfXVKMBC49Xab%SC z(bGp=3kiQT3K6bMD_E>|N!V$=*Lc3!HG%-#DKRRjwG8#zw9mYsGGVJs-duju<-_W7 z&&J$@*NvktXi3TTtsF9hD;9nik#o=Tc2m&!joAqBKR;&r=xZTLRxCC?Kdy7Vi~TgO z1i(>rOdQniyK1ElPY| zM*)ttJU(39%uK}1#gCmn=2{3G;~pEgjq(j&#hvB%xwn@W56F%v>^hM?2v9j3_==jN zPPoJ7_q)~+e>M>(7s!Z~$TN8t-XM{3pcwOeV~CekkXmVf*;ug1_=mM6Zn5*@rjNZ_ z2#O(0gQBSA5&&M+;7nnBH;794SIH4f<&nNl5H=`{i%;7DI?Vv}RJg^gI}%P5q3{Hu zK}s>&NA=<#Cju)s^mdahod#qwU({XCkDos7T8QER`No(?2M^M~3nGxZ+Ywd93>oKT zq)5vJEyShKE78w(Q%nVu%P-11T@q!am{6Gv)+H3*BGuNA8qdQYf|UUW9#2=#D5&mn z5^|dJ6Q+;9TL>4M>4U)v2$nmk*s(KmVyt?8fY88MO_R4*qR5Q~brC$2(6m(XLM6mM zTxswgBNM(10yRk9AQ%AThc!hgr=DO%z&VBiS$&fMhxFB+pE!NO-9iuz(a8jnWgGzwp3)k**fP)5Em07Ol=)LGLFI-c zO-Sii>T`_>gH!#A^OL4eycWVfB%pH*jY{p#Hj&N~$Rqs=al77~#ptb_;_#VGm{mYZ z26eP(-~?16i|YC$LjV)?#-{)&?<7+9zqZxaE=|$(zOb$V2F#%rO8W-dURqRL7>&TGN*<1&rh8`<+czPd^in4`mt<67ifTs zs3YETWpg|lw*)e1zL3ip2%nu(3k8TlfZfQLUPVTbSdsjsy;c>V8!5m!;wl39!d;?> zO^gD5M*^21M?^8FJU?yv)VqZMRFUwJ8&uR8-f#c~4VF|}ZxWA2#>*3Z;byG}V?Z)g zK6OJSvKhAA@E*?=SMZ(*zneCYk@%6qlyPdcAl5k`4-ghiwvE`u`RUWAT?+|5G7#}l z${Ko&xy1i!>=GssSs6fL&FiQqxPDA>C^8PV%wNjSx&su%zx#!IYB*lTWk^QAN^AW$Qzro zb1Ee10yD-G*lcpC+f-J0CP(MGzrc<$TL}jts9a&ms= z^cmMe*hOF%k|fuJLSqDl_k+f<|_}&G0cfN{b!MF#*%|v50Tsy!aZ#q=X0jdz7`TcV+3>1MHh+B z7{()<#L=l2l5Rzne(S9%{)XHYAw|*31$a%L#;(=p#Wez)=R!cTwUEHH17ez*hSW%G zreRKT(tdHS$(FiSfRy#_=S20YDuaYnZx?{+h4;Wa-$Z~EM8 zA*!k=i;<}733`KCZdi;mAdG4)iID#`u^QX_dTL|+KxjPe5`xw`Vw8?m%^NhBY z`+lBCLF45zHH(KOFVP4iq%`ERWps2G1q_|ss=Q%+^w~&<5SACn-0Y9iLDWbbXaXc7 zuWRMX{dj)C^!bl|27SlTzN=Bh{rz7NX3hDH(>L7n6}x(V)AWt^e1%=({O0MK?)eIQbbib9&39jU+`DQu z?Rb9c^ey*%g%juew&`2%{|dcsJimSVwtK#!I-TD!ef#}i@#yXOozr*R^A#hm^Sh?+ zy#Fi7kn_8z@4DwJLiGHe>AUaw3g^-Jz0>#H{}p2Bcz)mXz4v^DNPd3*^nLey1^hpM zVEX?1zas66^9QFNxc@6=vCa8I(+}SB6#~Nf!_yDl`<1Rge`NaMD z6e&SHqeRgrDFr-HDRFrPn)1aX&4o7~=Z{Z6b}hubfmDeFz45HB{}%JPKfrDHl?V zRU|cEq2|HQ^&tx}xS&!Qr+Bjf%{#_<1syWPmOU7_z6|G2PCs!iB#bm`;fYtgMG8dbZ%`dvU#$m_Z7zr+WBf7i>!7+KMf4BY+g2g- z-o!%tLTU(E97ZIG_sGX*t$aoi6_o5~@L751s?zT9NnjAD3Tx{o0Pd{}n1d1T7 zz~^~#NobH~p0D-fm13cDye{NH^vb?6I@@+A7)H(&S|bIh5;!)v^U?tUZU}0`bh{*y zw*~2;Y_H})gg+8l;3{awvypKu2Hl@OGyU{!A&kOAQb8&0*!H{?MVOLb_|`L)bIUsO z8n#;H2KS~)VKu_v#^3@mALVYInI1(eS{w=$Mzv?odd!XM+YA(s1M&WKJB0SwTcnI7yK)3Y~^oj*7I?6r_w6lMoP`II1& z?OZ=5)W^yY#g{5!FDYamN8}NUC@fG*P&U^WZcBMn*RYS=Y*k6{S$bjt{9rar8}ZGs zeHrmEkLTu}xieP_cntom=g&_+cP&JYrB0J6B$z=R4a0AD-_;%sWT;G#IR(}MYYmDeQ0V%IDFi^N_@d;+`3uv} zUkiz(Zz@C9Gm186jJue=%LK|^0kP-GuII;PeUCmvG!Y-pPZ?Bua5K{gYu1lc5_Tap z&L^p<#Bd^UUQZP=M0;mKAQ~9e#4tz|Af)Z*FHXO3ErjwQRHGzAz^X%pABHJ=15k?# zXj5pQDBOgK3nhU|k|~1<1S92!B}{5FBrx`+=R)Mc>Fiqa(9oVvqr_?Rst;W#?;9kj zd4b3+de2{)e(`Q0^4XAH9f@*+&QaFP@^bw#{(-2(Zq??MpW=sEz(q1nf05vDzC_^#`Bk_U%FdJ6fb&IE>|lI zCVo-h5bIp-iLQyA=;ag^WDhS;j0_s&8TTEzR(cS1>Hppa0=4C;2TPFOYXltk@41WExmNfPEuW_;lG z(fwVsHhl>5RRBrfxs8cr<{d=2-AHU@4s+VxznvHMvI@p*nw!Q5kV_3(OTQT~$H>F&kzc&5q-9mEP!p~vu8V)0fM$Af; z7pCCIZYyy`ldc`F2@yHY8+Y>BG0f03$Z_J->|hRwO-K21^~_SG7`dMR5z%%ww|A&y z;Fx!@W+QS3#XeHT&tIQ@?OF&@V>Cu*scvY+%fua}agVzmrln~@@Opf*7rC_ zQ%)0+Ov(`+$05KKeVHSeHERjudz>03sGun!ZpRr&n`Y5vZ_<$bSD??|n120kAvV&a z5pku&=WU1XVTgR+9+h5h3d=ZW3|5gcmqQ{{p|kWZnaxndP6w?a<35CzU<17DOd&`2 zT_bW%ZdMtFf@ZFs5Z)9PmxHf2M%MNDo6~RHEhHI}VWYGqbQw^vKxQUt&SgZ!`RLXJ zCa-Z)_RX9Pp6TA4Ah~8mS&s3Ku16K;_gBkpC`pi8o#Rh>lYt-vDu?x|F9uD*+NfNo zVR2l}-BxJ0B@)8Rm_1ml4avfTAFlcqHLQ~~NP`;P}NyRCALf5z((8+5z zNiS*<44_9r4t_&!vapB zg(#$gnKQ*pW|A3oAv44Ccc$OI79xD941l;x9%10HhQ&g*TB{=JVy;UWR7*W3#=1N# zDM}R;qM2qvMPwY^jBghxBzGGyxZo$NG5x4eQakrQsj5^_Q-3wY?MW;~<=f8Roqp$9 zNQTmsB=dk6C%BIwE#qCA&96VYY{U@@T)9wJ-|D1H%h0yIci zX=k}Lzy|1FuJ-tTQJ6Z-x+to^&QzgXazB4>`rT_G$+L{&v?hQj13F4(T)p(?$Q39G zFp$x!3arHCjqCxVhTXI`x1H&b(2t92Dphnt5?3mo=@#+W|FF!kBsbV;D5l>j3|?|z z4h=2O=kHIycefC)5Hu-{7zIBF71<21DvJ`|2HOVruj7<*Q&z1JIaJzbv*TEPYF^^HfxOJ4Dh|3;3uWpsc=H&##KbU_1ZXwYa^KN8ZkR5@Q zC*_nyMvubYh-|=nZtZaL`yg&l14}SLS+pg7E;j&kBZ9r_HsA^S@Hz!^T0JRE!0!e| zI9lLKyeS$LF;?8`r~Ubd(;r+5aT#3nt1n3H?d`la!#U>`1Xu_=n1Py%7%E{+-by1C z^7=}^j+!rJwPvC^;74yZ7|pe%RBA*yZ<@pfOmS}FJ3!=(Q_4399_zDDxtehP(e#Jc zLNcKXi`K;4Qpuo;;h%#K(#v%!=Lr2vPgmGk2yGejo7arQis%&LS7!{1QQjy8I3wf> zCu_``HN}G9U><;oB@A4nY3Dk2bYSIGQ#k*4`lGvrn4B1pf~jWu>6sW@8nOMrae#Hm zihgT%ayJ=Ekb2Cz8T}eII>)#WcnrX!#sGzbdrd5LFRc^ z{&N1w^vBmi;DGphBqd7B7L!*DtBSn4B!pWjH$mn(RB4?Plx+GC0l~DDU(3)#jboz8 zB;G@!AP!RCfX2}bQZ2O(3@r+=ZWvz)6-JeTo)w_;Pp3b*4XXCXKIZ(h=})geKXpF; zeEPH70y(yGxjPqV@;;*s2~+_+U&Oo3880tFgol~3>3s3+N+eAw*I*!JV^Itvy?-VdfWYM!7vh1)Ch{$l#`Yaw|<1LKD*E;jw^C zL-|6?^*S#aGZ_kNHvW!bjVsl+Dax==-T|TfpwbwZzr2lj=<9m0+9=@wTrU{Y3(YWf zS#A@MID;SjHTIFeoc`i&A-R~E)JJ_Gu-t{V5xPO<2T(Xg$@otA16?;MJDk${8&&(* z|TA( z!t3*Pue|<9;DV({u#fQ9`Pb85-7O?e<8*HNwa3;!y#^;J7MIF3Pzc2va#(f@ zVx(wBoWfa9!b3#DA67b2;!~kaULTw&LMLQzRKJEg^aokL(jjR{^o8WprRLbs1dAi00M|=C*W*O z3EOPP%@lkYPRWaJiGI$7-r(`+Pp%FVI{66Pj?=arf<%fe#ca;;ricRh?oiQg^2QMp zGS3IDK<8+f(>ecc`rB(EVo0Yjrwo5Yt-!L}XyUtxXT@zOtj~`EC=^CG$PmPyQnd31 zRiEqU@JGBJ?R2wH;|&O)?+{;P#Vx9VL0PRdMk>l>aiCWa17X|w_tW3qEksLUe3VZy z+(@1vt=MyK&!zu5+q}FByQfg(pMZoZ;`&CzlskL`J99dDGf0|+^HS%mgx;DPwOn&} zTS#+WF~f3UxW@3TKXH4*@#jBGe}64R7h}{hQAdTPhzSa|>Q!VAipO?O5As6S{52Hh zj;VjRf;bWpj8JiDv*)omq+i8_#-46Jg$v(;v7G)2m_x}XtrTwZT3%C=(Qim5!_sp8 z7K&lBU7&!~saGL&;coIhB_eVKX*VqGXlaZ%?s7`Kx$AXwJ z%phO86R)-wUzYiir-|)6n{;uRPMh{r?9w*={HN(3uZ7S9b3en#&MU-JCdE>T=W6nb zTT-_&#uuECH=2s@Yd?11)i{ua@a?>PzRE7I5G(jC+(C`;9jOZ3nkfv%^v2TO8l~dO zJMjW0s7y>(|2+NEt?#_&{P^=SpPx8C<^1>Q z->yIJPdxu)`uFS4Pk8M4pVNQb{`}NBxZX zb-(t-@BXL<8ph?{kDMNm|N8HLrrf;`|l69`Mi{l97OS6Bc5 diff --git a/server/__init__.py b/server/__init__.py index 244b868..d41954f 100644 --- a/server/__init__.py +++ b/server/__init__.py @@ -7,8 +7,8 @@ from server.views.api.api import route -def create_app(model_path, vocoder_path, speaker_ids_path, speech_speed, temperature, mp_workers, - use_cuda, use_mp, args) -> FastAPI: +def create_app(model_path, vocoder_path, speaker_ids_path, speech_speed, temperature, + use_cuda, args) -> FastAPI: app = FastAPI() @@ -20,9 +20,7 @@ async def startup_event(): speaker_ids_path=speaker_ids_path, speech_speed=speech_speed, temperature=temperature, - mp_workers=mp_workers, - use_cuda=use_cuda, - use_mp=use_mp, + use_cuda=use_cuda, unique_model=args.unique_model ) diff --git a/server/helper/config.py b/server/helper/config.py index 98d3520..0df70ea 100644 --- a/server/helper/config.py +++ b/server/helper/config.py @@ -5,44 +5,12 @@ from server.helper.singleton import Singleton from scripts.inference_onnx import load_onnx_tts -''' -class Config(metaclass=Singleton): - def __init__(self, model_path, config_path, speakers_file_path, - vocoder_path, vocoder_config_path, speaker_ids_path, - speech_speed, mp_workers, use_cuda, use_mp, show_details, args) -> None: - self.speech_speed = speech_speed - self.mp_workers = mp_workers - self.use_cuda = use_cuda - self.use_mp = use_mp - self.config_path = config_path - self.vocoder_config_path = vocoder_config_path - self.show_details = show_details - self.args = args - - self.synthesizer = Synthesizer( - tts_checkpoint=model_path, - tts_config_path=config_path, - tts_speakers_file=speakers_file_path, - tts_languages_file=None, - vocoder_checkpoint=vocoder_path, - vocoder_config=vocoder_config_path, - encoder_checkpoint="", - encoder_config="", - use_cuda=use_cuda - ) - - self.speakerConfigAttributes = SpeakerConfigAttributes(self.synthesizer, speaker_ids_path) -''' - - class ConfigONNX(metaclass=Singleton): def __init__(self, model_path, vocoder_path, speaker_ids_path, - speech_speed, temperature, mp_workers, use_cuda, use_mp, unique_model) -> None: + speech_speed, temperature, use_cuda, unique_model) -> None: self.speech_speed = speech_speed self.temperature = temperature - self.mp_workers = mp_workers self.use_cuda = use_cuda - self.use_mp = use_mp self.model_path = model_path self.vocoder_path = vocoder_path self.unique_model = unique_model @@ -53,44 +21,6 @@ def __init__(self, model_path, vocoder_path, speaker_ids_path, self.speakerConfigAttributes = SpeakerConfigAttributes(speaker_ids_path) -''' -class SpeakerConfigAttributes: - def __init__(self, synthesizer, speaker_ids_path) -> None: - self.use_multi_speaker = None - self.speaker_ids = None - self.speaker_manager = None - self.languages = None - self.new_speaker_ids = None - self.use_aliases = True - self.use_gst = None - - self.setup_speaker_attributes(synthesizer, speaker_ids_path) - - def setup_speaker_attributes(self, model, speaker_ids_path): - # global new_speaker_ids, use_aliases - - use_multi_speaker = hasattr(model.tts_model, "num_speakers") and ( - model.tts_model.num_speakers > 1 or model.tts_speakers_file is not None) - - speaker_manager = getattr(model.tts_model, "speaker_manager", None) - if speaker_manager: - self.new_speaker_ids = json.load(open(speaker_ids_path)) - - if self.use_aliases: - self.speaker_ids = self.new_speaker_ids - else: - self.speaker_ids = speaker_manager.ids - - self.languages = ['ca-es'] - - # TODO: set this from SpeakerManager - self.use_gst = model.tts_config.get("use_gst", False) - - self.use_multi_speaker = use_multi_speaker - self.speaker_manager = speaker_manager -''' - - class SpeakerConfigAttributes: def __init__(self, speaker_ids_path) -> None: self.use_multi_speaker = None @@ -116,6 +46,6 @@ def setup_speaker_attributes(self, speaker_ids_path): if self.use_aliases: self.speaker_ids = self.new_speaker_ids - self.languages = ['ca-es'] + self.languages = ['ca-es', 'ca-ba', 'ca-nw', 'ca-va'] self.use_multi_speaker = use_multi_speaker diff --git a/server/modules/tts_request_model.py b/server/modules/tts_request_model.py index 02c1bb1..6048a33 100644 --- a/server/modules/tts_request_model.py +++ b/server/modules/tts_request_model.py @@ -3,7 +3,7 @@ class TTSRequestModel(BaseModel): - language: Union[str, None] = "ca-es" # ca-ba, ca-nw, ca-va + language: Union[str, None] = "ca-es" # ca-ba, ca-nw, ca-va voice: str = Field(...) type: str = Field(...) text: str = Field(..., min_length=1) diff --git a/server/tests/base_test_case.py b/server/tests/base_test_case.py index 7522f29..c937a85 100644 --- a/server/tests/base_test_case.py +++ b/server/tests/base_test_case.py @@ -12,16 +12,14 @@ def setup(self): class configBaseTestCase: def setup(self): config = Config( - model_path="models/vits_ca/best_model.pth", + model_path="models/matxa_onnx/best_model.onnx", config_path="models/vits_ca/config.json", speakers_file_path=None, vocoder_path=None, vocoder_config_path=None, - speaker_ids_path="models/vits_ca/speaker_ids.json", + speaker_ids_path="models/matxa_onnx/speaker_ids.json", speech_speed=1.0, - mp_workers=1, use_cuda=False, - use_mp=False, show_details=True, args={} ) \ No newline at end of file diff --git a/server/views/api/api.py b/server/views/api/api.py index aa23019..b89c035 100644 --- a/server/views/api/api.py +++ b/server/views/api/api.py @@ -46,7 +46,7 @@ def index(request: Request): def parameters(): config = ConfigONNX() return JSONResponse( - content={"speech_speed": config.speech_speed, "mp_workers": config.mp_workers, "use_cuda": config.use_cuda, "use_mp": config.use_mp}, + content={"speech_speed": config.speech_speed, "use_cuda": config.use_cuda}, ) @@ -135,87 +135,17 @@ def tts(request: TTSRequestModel): temperature = config.temperature unique_model = config.unique_model - if config.use_cuda or not config.use_mp: - wavs = worker_onnx_audio(text, speaker_id=speaker_id, model_path=model_path, unique_model=unique_model, - vocoder_path=vocoder_path, - use_aliases=speaker_config_attributes["use_aliases"], - new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, - temperature=temperature, speaking_rate=speech_rate) - wavs = list(np.squeeze(wavs)) - out = io.BytesIO() - save_wav(wavs, out) + wavs = worker_onnx_audio(text, speaker_id=speaker_id, model_path=model_path, unique_model=unique_model, + vocoder_path=vocoder_path, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, + temperature=temperature, speaking_rate=speech_rate) - else: - - sentences = segmenter.segment(text) # list with pieces of long text input - print("sentences are segmented well...") - mp_workers = config.mp_workers # number of cpu's available for multiprocessing - manager = mp.Manager() # manager to deal with processes and cpu's available in the multiprocessing - print("manager initialized correctly...") - sessions = manager.list([None] * mp_workers) # create a list of ID's of sessions - print("list of sessions correctly set...") - print(len(sessions)) - - # global sessions - # sessions = [init_session_workers(model_path, use_cuda) for _ in range(num_cpus)] - - tasks = [(i % mp_workers, sentences[i]) for i in range(len(sentences))] - - print("tasks initialized...") - print(tasks) - - def worker_task(task): - session_index, sentence = task - - global sessions - - session = sessions[session_index] - - # session = list(sessions)[session_index] # this is the ONNX session I need to use for inference - - print("session called for inference...") - # print(session) - - wavs = worker_onnx(sentence, speaker_id=speaker_id, model=session, vocoder_model=None, - use_aliases=speaker_config_attributes["use_aliases"], - new_speaker_ids=speaker_config_attributes["new_speaker_ids"], - temperature=temperature, speaking_rate=speech_rate) - - return wavs - - with mp.Pool(processes=mp_workers) as pool: - pool.starmap(init_session_workers, [(model_path, sessions, i, use_cuda) for i in range(mp_workers)]) - - # preload all sessions according to the number of workers available (num. of cpu's) - # ort_sessions = [load_onnx_tts_unique(model_path=model_path, use_cuda=use_cuda) for _ in mp_workers] - - with mp.Pool(processes=mp_workers) as pool: - results = pool.map(worker_task, tasks) - - - ''' - worker_with_args = partial(worker_onnx_audio, speaker_id=speaker_id, model_path=model_path, - unique_model=unique_model, vocoder_path=vocoder_path, - use_aliases=speaker_config_attributes["use_aliases"], - new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, - temperature=temperature, speaking_rate=speech_rate) - - pool = mp.Pool(processes=mp_workers) - - results = pool.map(worker_with_args, [sentence.strip() for sentence in sentences if sentence]) - ''' - - list_of_results = [tensor.squeeze().tolist() for tensor in results] - # Close the pool to indicate that no more tasks will be submitted - pool.close() - # Wait for all processes to complete - pool.join() - merged_wavs = list(chain(*list_of_results)) - - out = io.BytesIO() - - save_wav(merged_wavs, out) + wavs = list(np.squeeze(wavs)) + out = io.BytesIO() + save_wav(wavs, out) + return StreamingResponse(out, media_type="audio/wav") diff --git a/server/workers/workers.py b/server/workers/workers.py index 53ba33c..737b01d 100644 --- a/server/workers/workers.py +++ b/server/workers/workers.py @@ -7,44 +7,6 @@ import numpy as np from scripts.inference_onnx import write_wav, load_onnx_tts, load_onnx_tts_unique -''' -def worker(sentence, speaker_id, model, use_aliases, new_speaker_ids): - def substitute_time(sentence): - # Regular expression to find time pattern (HH:MM) - time_pattern = re.compile(r'((?<=\s)\d{1,2}):(\d{2}(?=\s))') - - # Find all matches of time pattern in the sentence - matches = re.findall(time_pattern, sentence) - - if not matches: - return sentence - - sentence = re.sub(r'les\s+', '', sentence, count=1) - - # Iterate through matches and substitute with formatted time string - for match in matches: - H = int(match[0]) - M = int(match[1]) - dt = datetime.datetime(2017, 1, 31, H, M, 0, tzinfo=default_timezone()) # Using UTC timezone for simplicity - formatted_time = nice_time(dt, lang="ca", use_24hour=True) # Assuming you have a function to format time in Catalan - sentence = sentence.replace(f'{match[0]}:{match[1]}', formatted_time) - - return sentence - - sentence = substitute_time(sentence) - - print(" > Model input: {}".format(sentence)) - print(" > Speaker Idx: {}".format(speaker_id)) - - if use_aliases: - input_speaker_id = new_speaker_ids[speaker_id] - else: - input_speaker_id = speaker_id - - wavs = model.tts(sentence, input_speaker_id) - - return wavs -''' def worker_onnx(sentence, speaker_id, model, vocoder_model, use_aliases, new_speaker_ids, temperature, speaking_rate): From 4759c93ff39ba3c99f2af90ca533a95470011b93 Mon Sep 17 00:00:00 2001 From: PaulNdrei Date: Fri, 20 Sep 2024 16:12:16 +0200 Subject: [PATCH 09/19] Model download fix readme --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 0297387..8cfda88 100644 --- a/README.md +++ b/README.md @@ -110,12 +110,7 @@ https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matc Note: You will need a Huggingface account because the model privacity is setted to gated. Rename the onnx model to best_model.onnx and move it to /models/matxa_onnx folder -```bash -wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P models/vits_ca/ -``` -```bash -mv models/vits_ca/best_model_8khz.pth models/vits_ca/best_model.pth -``` + ## REST API Endpoints From 1206bc3844c92a6c05febc27c12ac19241553099 Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Mon, 23 Sep 2024 16:59:06 +0200 Subject: [PATCH 10/19] Update cleaners.py Set different phonemizers for each accent --- text/cleaners.py | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/text/cleaners.py b/text/cleaners.py index c0a5fdf..e6d174f 100644 --- a/text/cleaners.py +++ b/text/cleaners.py @@ -33,13 +33,19 @@ logger=critical_logger, ) -global_phonemizer_cat = phonemizer.backend.EspeakBackend( - language="ca", # 'ca' if catalan - preserve_punctuation=True, - with_stress=True, - language_switch="remove-flags", - logger=critical_logger, -) +# global_phonemizer_cat = phonemizer.backend.EspeakBackend( +# language="ca", # 'ca' if catalan +# preserve_punctuation=True, +# with_stress=True, +# language_switch="remove-flags", +# logger=critical_logger, +# ) + + +backend_cat = phonemizer.backend.EspeakBackend("ca", preserve_punctuation=True, with_stress=True) +backend_bal = phonemizer.backend.EspeakBackend("ca-ba", preserve_punctuation=True, with_stress=True) +backend_val = phonemizer.backend.EspeakBackend("ca-va", preserve_punctuation=True, with_stress=True) +backend_occ = phonemizer.backend.EspeakBackend("ca-nw", preserve_punctuation=True, with_stress=True) # Regular expression matching whitespace: _whitespace_re = re.compile(r"\s+") @@ -115,13 +121,12 @@ def english_cleaners2(text): def catalan_cleaners(text): - """Pipeline for Catalan text, including abbreviation expansion. + punctuation + stress""" - # text = convert_to_ascii(text) + """Pipeline for catalan text, including punctuation + stress""" + #text = convert_to_ascii(text) text = lowercase(text) - # text = expand_abbreviations(text) - phonemes = global_phonemizer_cat.phonemize([text], strip=True, njobs=1)[0] + #text = expand_abbreviations(text) + phonemes = backend_cat.phonemize([text], strip=True)[0] phonemes = collapse_whitespace(phonemes) - # print(phonemes) # check punctuations!! return phonemes @@ -130,7 +135,7 @@ def catalan_balear_cleaners(text): # text = convert_to_ascii(text) text = lowercase(text) # text = expand_abbreviations(text) - phonemes = global_phonemizer_cat_bal.phonemize([text], strip=True, njobs=1)[0] + phonemes = backend_bal.phonemize([text], strip=True, njobs=1)[0] phonemes = collapse_whitespace(phonemes) # print(phonemes) # check punctuations!! return phonemes @@ -141,7 +146,7 @@ def catalan_occidental_cleaners(text): # text = convert_to_ascii(text) text = lowercase(text) # text = expand_abbreviations(text) - phonemes = global_phonemizer_cat_occ.phonemize([text], strip=True, njobs=1)[0] + phonemes = backend_occ.phonemize([text], strip=True, njobs=1)[0] phonemes = collapse_whitespace(phonemes) # print(phonemes) # check punctuations!! return phonemes @@ -152,7 +157,7 @@ def catalan_valencia_cleaners(text): # text = convert_to_ascii(text) text = lowercase(text) # text = expand_abbreviations(text) - phonemes = global_phonemizer_cat_val.phonemize([text], strip=True, njobs=1)[0] + phonemes = backend_val.phonemize([text], strip=True, njobs=1)[0] phonemes = collapse_whitespace(phonemes) # print(phonemes) # check punctuations!! return phonemes From 5c5e3b5cff23c758b1e007ede393fbef998bf3de Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Mon, 23 Sep 2024 17:01:38 +0200 Subject: [PATCH 11/19] Update api.py call `worker_onnx_audio_multiaccent` function --- server/views/api/api.py | 98 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 87 insertions(+), 11 deletions(-) diff --git a/server/views/api/api.py b/server/views/api/api.py index b89c035..69d2845 100644 --- a/server/views/api/api.py +++ b/server/views/api/api.py @@ -22,7 +22,7 @@ from server.audio_utils.audio_utils import generate_audio, play_audio from server.exceptions import LanguageException, SpeakerException from server.helper.config import ConfigONNX -from server.workers.workers import worker_onnx, worker_onnx_audio +from server.workers.workers import worker_onnx, worker_onnx_audio, worker_onnx_audio_multiaccent from scripts.inference_onnx import save_wav, load_onnx_tts_unique @@ -46,7 +46,7 @@ def index(request: Request): def parameters(): config = ConfigONNX() return JSONResponse( - content={"speech_speed": config.speech_speed, "use_cuda": config.use_cuda}, + content={"speech_speed": config.speech_speed, "mp_workers": config.mp_workers, "use_cuda": config.use_cuda, "use_mp": config.use_mp}, ) @@ -135,17 +135,93 @@ def tts(request: TTSRequestModel): temperature = config.temperature unique_model = config.unique_model + if config.use_cuda or not config.use_mp: + # wavs = worker_onnx_audio(text, speaker_id=speaker_id, model_path=model_path, unique_model=unique_model, + # vocoder_path=vocoder_path, + # use_aliases=speaker_config_attributes["use_aliases"], + # new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, + # temperature=temperature, speaking_rate=speech_rate) - wavs = worker_onnx_audio(text, speaker_id=speaker_id, model_path=model_path, unique_model=unique_model, - vocoder_path=vocoder_path, - use_aliases=speaker_config_attributes["use_aliases"], - new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, - temperature=temperature, speaking_rate=speech_rate) + wavs = worker_onnx_audio_multiaccent(text, speaker_id=speaker_id, model_path=model_path, + unique_model=unique_model, vocoder_path=vocoder_path, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], + use_cuda=use_cuda, temperature=temperature, speaking_rate=speech_rate) - wavs = list(np.squeeze(wavs)) - out = io.BytesIO() - save_wav(wavs, out) - + wavs = list(np.squeeze(wavs)) + out = io.BytesIO() + save_wav(wavs, out) + + else: + + sentences = segmenter.segment(text) # list with pieces of long text input + print("sentences are segmented well...") + mp_workers = config.mp_workers # number of cpu's available for multiprocessing + manager = mp.Manager() # manager to deal with processes and cpu's available in the multiprocessing + print("manager initialized correctly...") + sessions = manager.list([None] * mp_workers) # create a list of ID's of sessions + print("list of sessions correctly set...") + print(len(sessions)) + + # global sessions + # sessions = [init_session_workers(model_path, use_cuda) for _ in range(num_cpus)] + + tasks = [(i % mp_workers, sentences[i]) for i in range(len(sentences))] + + print("tasks initialized...") + print(tasks) + + def worker_task(task): + session_index, sentence = task + + global sessions + + session = sessions[session_index] + + # session = list(sessions)[session_index] # this is the ONNX session I need to use for inference + + print("session called for inference...") + # print(session) + + wavs = worker_onnx(sentence, speaker_id=speaker_id, model=session, vocoder_model=None, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], + temperature=temperature, speaking_rate=speech_rate) + + return wavs + + with mp.Pool(processes=mp_workers) as pool: + pool.starmap(init_session_workers, [(model_path, sessions, i, use_cuda) for i in range(mp_workers)]) + + # preload all sessions according to the number of workers available (num. of cpu's) + # ort_sessions = [load_onnx_tts_unique(model_path=model_path, use_cuda=use_cuda) for _ in mp_workers] + + with mp.Pool(processes=mp_workers) as pool: + results = pool.map(worker_task, tasks) + + + ''' + worker_with_args = partial(worker_onnx_audio, speaker_id=speaker_id, model_path=model_path, + unique_model=unique_model, vocoder_path=vocoder_path, + use_aliases=speaker_config_attributes["use_aliases"], + new_speaker_ids=speaker_config_attributes["new_speaker_ids"], use_cuda=use_cuda, + temperature=temperature, speaking_rate=speech_rate) + + pool = mp.Pool(processes=mp_workers) + + results = pool.map(worker_with_args, [sentence.strip() for sentence in sentences if sentence]) + ''' + + list_of_results = [tensor.squeeze().tolist() for tensor in results] + # Close the pool to indicate that no more tasks will be submitted + pool.close() + # Wait for all processes to complete + pool.join() + merged_wavs = list(chain(*list_of_results)) + + out = io.BytesIO() + + save_wav(merged_wavs, out) return StreamingResponse(out, media_type="audio/wav") From 5a7ba09b3e6e802a84cb08664eac09e690724e6b Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Mon, 23 Sep 2024 17:03:22 +0200 Subject: [PATCH 12/19] Update workers.py Define multiaccent worker method --- server/workers/workers.py | 126 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/server/workers/workers.py b/server/workers/workers.py index 737b01d..e133a0e 100644 --- a/server/workers/workers.py +++ b/server/workers/workers.py @@ -8,6 +8,69 @@ from scripts.inference_onnx import write_wav, load_onnx_tts, load_onnx_tts_unique +cleaners = {"balear": "catalan_balear_cleaners", + "nord-occidental": "catalan_occidental_cleaners", + "valencia": "catalan_valencia_cleaners", + "central": "catalan_cleaners"} + +spk_id_accents_dict = { + "balear": { + "quim": 0, + "olga": 1 + }, + "central": { + "grau": 2, + "elia": 3 + }, + "nord-occidental": { + "pere": 4, + "emma": 5 + }, + "valencia": { + "lluc": 6, + "gina": 7 + } +} + +''' +def worker(sentence, speaker_id, model, use_aliases, new_speaker_ids): + def substitute_time(sentence): + # Regular expression to find time pattern (HH:MM) + time_pattern = re.compile(r'((?<=\s)\d{1,2}):(\d{2}(?=\s))') + + # Find all matches of time pattern in the sentence + matches = re.findall(time_pattern, sentence) + + if not matches: + return sentence + + sentence = re.sub(r'les\s+', '', sentence, count=1) + + # Iterate through matches and substitute with formatted time string + for match in matches: + H = int(match[0]) + M = int(match[1]) + dt = datetime.datetime(2017, 1, 31, H, M, 0, tzinfo=default_timezone()) # Using UTC timezone for simplicity + formatted_time = nice_time(dt, lang="ca", use_24hour=True) # Assuming you have a function to format time in Catalan + sentence = sentence.replace(f'{match[0]}:{match[1]}', formatted_time) + + return sentence + + sentence = substitute_time(sentence) + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if use_aliases: + input_speaker_id = new_speaker_ids[speaker_id] + else: + input_speaker_id = speaker_id + + wavs = model.tts(sentence, input_speaker_id) + + return wavs +''' + def worker_onnx(sentence, speaker_id, model, vocoder_model, use_aliases, new_speaker_ids, temperature, speaking_rate): @@ -102,3 +165,66 @@ def intersperse(lst, item): } ''' return write_wav(model, inputs=inputs, output_dir='', external_vocoder=vocoder_model) + + +def worker_onnx_audio_multiaccent(sentence, speaker_id, model_path, unique_model, vocoder_path, use_aliases, + new_speaker_ids, use_cuda, temperature, speaking_rate): + + def intersperse(lst, item): + # Adds blank symbol + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + print(" > Model input: {}".format(sentence)) + print(" > Speaker Idx: {}".format(speaker_id)) + + if unique_model: + model = load_onnx_tts_unique(model_path=model_path, use_cuda=use_cuda) + vocoder_model = None + else: + model, vocoder_model = load_onnx_tts(model_path=model_path, vocoder_path=vocoder_path, use_cuda=use_cuda) + + accent = "" + input_speaker_id = 10000 + + print(speaker_id) + + if use_aliases: + for acc in spk_id_accents_dict.keys(): + print(acc) + if speaker_id in spk_id_accents_dict[acc].keys(): + input_speaker_id = spk_id_accents_dict[acc][speaker_id] + print(input_speaker_id) + accent = acc + else: + input_speaker_id = speaker_id + + x = torch.tensor( + intersperse(text_to_sequence(sentence, [cleaners[accent]]), 0), + dtype=torch.long, + device="cpu", + )[None] + + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x = x.detach().cpu().numpy() + + x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device="cpu") + x_lengths = np.array([x_lengths.item()], dtype=np.int64) + + inputs = { + "x": x, + "x_lengths": x_lengths, + "scales": np.array([temperature, speaking_rate], dtype=np.float32), + "spks": np.repeat(input_speaker_id, x.shape[0]).astype(np.int64) + } + + ''' + inputs = { + "model1_x": x, + "model1_x_lengths": x_lengths, + "model1_scales": np.array([temperature, speaking_rate], dtype=np.float32), + "model1_spks": np.repeat(input_speaker_id, x.shape[0]).astype(np.int64) + } + ''' + return write_wav(model, inputs=inputs, output_dir='', external_vocoder=vocoder_model) From 8f234c198ae9fcc35e792ec593bd0d4fe08d400e Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Mon, 23 Sep 2024 17:06:55 +0200 Subject: [PATCH 13/19] Add multiaccented speaker ID dictionary --- models/matxa_onnx/spk_ids_lafresca.json | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 models/matxa_onnx/spk_ids_lafresca.json diff --git a/models/matxa_onnx/spk_ids_lafresca.json b/models/matxa_onnx/spk_ids_lafresca.json new file mode 100644 index 0000000..044fa6c --- /dev/null +++ b/models/matxa_onnx/spk_ids_lafresca.json @@ -0,0 +1,10 @@ +{ + "quim": 0, + "olga": 1, + "grau": 2, + "elia": 3, + "pere": 4, + "emma": 5, + "lluc": 6, + "gina": 7 +} From 4ca2b3304c7d7f667583ed7b8bf9a5ca1a685d62 Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Fri, 27 Sep 2024 15:00:52 +0200 Subject: [PATCH 14/19] Update spk_ids.json changed spk ID dictionary to the multiaccented version --- models/matxa_onnx/spk_ids.json | 57 ++++++---------------------------- 1 file changed, 9 insertions(+), 48 deletions(-) diff --git a/models/matxa_onnx/spk_ids.json b/models/matxa_onnx/spk_ids.json index 69f2405..044fa6c 100644 --- a/models/matxa_onnx/spk_ids.json +++ b/models/matxa_onnx/spk_ids.json @@ -1,49 +1,10 @@ { - "cam_03115": 0, - "caf_04247": 1, - "caf_05450": 2, - "cam_08935": 3, - "caf_09901": 4, - "ona": 5, - "pol": 6, - "cam_02689": 7, - "caf_06042": 8, - "jan": 9, - "caf_08106": 10, - "cam_04910": 11, - "cam_08664": 12, - "caf_07803": 13, - "cam_06582": 14, - "caf_06311": 15, - "caf_07245": 16, - "cam_06279": 17, - "caf_09598": 18, - "caf_09796": 19, - "eva": 20, - "cam_00762": 21, - "caf_09204": 22, - "caf_03944": 23, - "caf_05147": 24, - "uri": 25, - "mar": 26, - "cam_00459": 27, - "teo": 28, - "caf_03655": 29, - "bet": 30, - "cam_06705": 31, - "caf_05739": 32, - "caf_06008": 33, - "cam_04484": 34, - "cam_03386": 35, - "cam_08967": 36, - "caf_06942": 37, - "cam_07140": 38, - "pau": 39, - "caf_08001": 40, - "pep": 41, - "cam_04787": 42, - "eli": 43, - "caf_01591": 44, - "caf_02452": 45, - "cam_02992": 46 -} \ No newline at end of file + "quim": 0, + "olga": 1, + "grau": 2, + "elia": 3, + "pere": 4, + "emma": 5, + "lluc": 6, + "gina": 7 +} From 84f954b8d9ae4788c17f1c4c9b5d840b24aa685e Mon Sep 17 00:00:00 2001 From: AlexPeiroLilja Date: Fri, 27 Sep 2024 15:01:14 +0200 Subject: [PATCH 15/19] Delete models/matxa_onnx/spk_ids_lafresca.json --- models/matxa_onnx/spk_ids_lafresca.json | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 models/matxa_onnx/spk_ids_lafresca.json diff --git a/models/matxa_onnx/spk_ids_lafresca.json b/models/matxa_onnx/spk_ids_lafresca.json deleted file mode 100644 index 044fa6c..0000000 --- a/models/matxa_onnx/spk_ids_lafresca.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "quim": 0, - "olga": 1, - "grau": 2, - "elia": 3, - "pere": 4, - "emma": 5, - "lluc": 6, - "gina": 7 -} From 7e2228800a9aa218ca2edb5c05403ae4e765ba76 Mon Sep 17 00:00:00 2001 From: PaulNdrei Date: Fri, 27 Sep 2024 19:08:33 +0200 Subject: [PATCH 16/19] WIP --- .dockerignore | 3 +- .github/workflows/docker-image.yml | 2 + .github/workflows/tests.yml | 11 +-- Dockerfile | 11 ++- Dockerfile.dev | 13 +-- Makefile | 2 +- README.md | 40 +++++---- charts/aina-tts-api/Chart.yaml | 7 +- docker-compose-dev.yml | 7 +- docker-compose-gpu.yml | 8 +- docker-compose-test.yml | 3 +- docker-compose.yml | 7 +- scripts/change_model.py | 14 --- server/modules/tts_request_model.py | 4 +- server/templates/index.html | 6 +- server/tests/api/views/test_api.py | 8 +- server/tests/base_test_case.py | 17 ++-- server/tests/test_config.py | 6 +- server/tests/test_worker.py | 20 +++-- server/views/api/api.py | 127 +++++++++++++--------------- 20 files changed, 160 insertions(+), 156 deletions(-) delete mode 100644 scripts/change_model.py diff --git a/.dockerignore b/.dockerignore index e5341f8..1e3a950 100644 --- a/.dockerignore +++ b/.dockerignore @@ -33,4 +33,5 @@ _main.py # Exclude specific files or directories if needed for the Docker build # !important_file.py -# !important_directory/ \ No newline at end of file +# !important_directory/ +models/matxa_onnx/best_model.onnx \ No newline at end of file diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 9e55b73..f0c0306 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -30,5 +30,7 @@ jobs: uses: docker/build-push-action@v5 with: context: . + secrets: | + HF_TOKEN=${{ secrets.HF_TOKEN }} push: true tags: projecteaina/tts-api:latest, projecteaina/tts-api:${{ github.sha }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a7de90f..f51e4ca 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -41,18 +41,13 @@ jobs: - name: install espeak-ng run: | - git clone -b dev-ca https://github.com/projecte-aina/espeak-ng + git clone https://github.com/espeak-ng/espeak-ng pip install --upgrade pip cd espeak-ng && sudo ./autogen.sh && sudo ./configure --prefix=/usr && sudo make && sudo make install - - name: install lingua-franca - run: pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c - - - name: install model + - name: download model run: | - wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P ./models/vits_ca/ - mv ./models/vits_ca/best_model_8khz.pth ./models/vits_ca/best_model.pth - + wget --header="Authorization: Bearer ${{ secrets.HF_TOKEN }}" https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matxa_multiaccent_wavenext_e2e.onnx -O ./models/matxa_onnx/best_model.onnx - name: Run tests run: pytest diff --git a/Dockerfile b/Dockerfile index 88deb52..a01668b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ FROM python:3.10.12-slim -# RUN apt-get update && apt-get install -y --no-install-recommends wget gcc g++ make python3 python3-dev python3-pip python3-venv python3-wheel espeak espeak-ng libsndfile1-dev && rm -rf /var/lib/apt/lists/* # Install required packages for building eSpeak and general utilities + RUN apt-get update && apt-get install -y \ build-essential \ autoconf \ @@ -13,8 +13,15 @@ RUN apt-get update && apt-get install -y \ cmake \ && rm -rf /var/lib/apt/lists/* -RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng +# download huggingface gated model +RUN mkdir -p /app/models/matxa_onnx + +RUN --mount=type=secret,id=HF_TOKEN \ + wget --header="Authorization: Bearer $(cat /run/secrets/HF_TOKEN)" https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matxa_multiaccent_wavenext_e2e.onnx -O /app/models/matxa_onnx/best_model.onnx + +# install espeak-ng +RUN git clone https://github.com/espeak-ng/espeak-ng RUN pip install --upgrade pip && \ cd espeak-ng && \ ./autogen.sh && \ diff --git a/Dockerfile.dev b/Dockerfile.dev index d345f6e..e284a07 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -13,7 +13,14 @@ RUN apt-get update && apt-get install -y \ cmake \ && rm -rf /var/lib/apt/lists/* -RUN git clone -b dev-ca https://github.com/projecte-aina/espeak-ng +# download huggingface gated model +RUN mkdir -p /app/models/matxa_onnx + +RUN --mount=type=secret,id=HF_TOKEN \ + wget --header="Authorization: Bearer $(cat /run/secrets/HF_TOKEN)" https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matxa_multiaccent_wavenext_e2e.onnx -O /app/models/matxa_onnx/best_model.onnx + +RUN git clone https://github.com/espeak-ng/espeak-ng + RUN pip install --upgrade pip && \ cd espeak-ng && \ @@ -22,11 +29,7 @@ RUN pip install --upgrade pip && \ make && \ make install -RUN pip install git+https://github.com/MycroftAI/lingua-franca.git@5bfd75fe5996fd364102a0eec3f714c9ddc9275c - WORKDIR /app -# RUN wget -q http://share.laklak.eu/model_vits_ca/best_model_8khz.pth -P /app/models/vits_ca/ -# RUN mv /app/models/vits_ca/best_model_8khz.pth /app/models/vits_ca/best_model.pth COPY ./requirements.txt /app RUN python -m pip install --upgrade pip diff --git a/Makefile b/Makefile index 2315c6a..299f07d 100644 --- a/Makefile +++ b/Makefile @@ -15,4 +15,4 @@ stop: act-run-tests: - gh act -j test -W '.github/workflows/tests.yml' \ No newline at end of file + act --secret-file .env -j test -W '.github/workflows/tests.yml' \ No newline at end of file diff --git a/README.md b/README.md index 8cfda88..2efc2a5 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # TTS API -RestFUL api and web interface to serve coqui TTS models +RestFUL api and web interface to serve matcha TTS models ## Installation -The requirements are tested for python 3.10. In order for coqui TTS to work, some dependencies should be installed. +The requirements are tested for python 3.10. In order for matcha TTS to work, some dependencies should be installed. 1. Update your system's package list and install the required packages for building eSpeak and general utilities: ```bash @@ -18,9 +18,10 @@ sudo apt update && sudo apt install -y \ wget \ cmake ``` + 2. Clone the eSpeak-ng repository and build it: ```bash -git clone -b dev-ca https://github.com/projecte-aina/espeak-ng +git clone https://github.com/espeak-ng/espeak-ng cd espeak-ng && \ sudo ./autogen.sh && \ sudo ./configure --prefix=/usr && \ @@ -34,14 +35,29 @@ Later simply: python -m pip install --upgrade pip ``` -In order to synthesize, the actual model needs to be downloaded and the paths in the config file need to be changed (replacing `/opt` with the top directory of the repository). The model can be downloaded from [http://share.laklak.eu/model_vits_ca/best_model.pth](http://share.laklak.eu/model_vits_ca/best_model.pth) to the models directory. + +> [!NOTE] +> The model **best_model.onnx** is requiered, you have to download by yourself. + +Download the model from HuggingFace +https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matcha_multispeaker_cat_all_opset_15_10_steps.onnx + +Note: You will need a Huggingface account because the model privacity is setted to gated. + +Rename the onnx model to best_model.onnx and move it to ./models/matxa_onnx folder + +or download using wget + +```bash +wget --header="Authorization: Bearer REPLACE_WITH_YOUR_HF_TOKEN" https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matxa_multiaccent_wavenext_e2e.onnx -O ./models/matxa_onnx/best_model.onnx +``` ## Launch tts-api uses `FastAPI` and `uvicorn` under the hood. For now, in order to launch: ``` -python server/server.py --model_path models/vits_ca/best_model.pth --config_path models/vits_ca/config.json --port 8001 +python server/server.py --model_path models/matxa_onnx/best_model.onnx --port 8001 ``` that receives the calls from `0.0.0.0:8001`, or simply ``` @@ -60,7 +76,7 @@ The example for `/api/tts` can be found in `/docs`. The websocket request is con ``` curl --location --request POST 'http://localhost:8000/api/tts' --header 'Content-Type: application/json' --data-raw '{ - "voice": "f_cen_81", + "voice": "quim", "type": "text", "text": "El Consell s’ha reunit avui per darrera vegada abans de les eleccions. Divendres vinent, tant el president com els consellers ja estaran en funcions. A l’ordre del dia d’avui tampoc no hi havia l’aprovació del requisit lingüístic, és a dir la normativa que ha de regular la capacitació lingüística dels aspirants a accedir a un lloc en la Funció Pública Valenciana.", "language": "ca-es" }' --output tts.wav @@ -101,15 +117,6 @@ To run in dev mode run the following command. make dev ``` -> [!NOTE] -> The model **best_model.onnx** is requiered, you have to download by yourself. - -Download the model from HuggingFace -https://huggingface.co/projecte-aina/matxa-tts-cat-multiaccent/resolve/main/matcha_multispeaker_cat_all_opset_15_10_steps.onnx - -Note: You will need a Huggingface account because the model privacity is setted to gated. - -Rename the onnx model to best_model.onnx and move it to /models/matxa_onnx folder ## REST API Endpoints @@ -122,7 +129,7 @@ Rename the onnx model to best_model.onnx and move it to /models/matxa_onnx folde | **Parameter** | **Type** | **Description** | |---------------|--------------------|------------------------------------------------------------| -| language | string | ISO language code (e.g., "ca-es") | +| language | string | ISO language code (e.g., "ca-es", "ca-ba", "ca-nw", "ca-va") | | voice | string | Name of the voice to use | | type | string | Type of input text ("text" or "ssml") | | text | string | Text to be synthesized (if type is "ssml", enclose in tags) | @@ -130,7 +137,6 @@ Rename the onnx model to best_model.onnx and move it to /models/matxa_onnx folde **NOTES:** - ssml format is not available yet. -- Currently, only "ca-ba, ca-nw, ca-va" directions are supported, and will be applied by default **Successful Response:** diff --git a/charts/aina-tts-api/Chart.yaml b/charts/aina-tts-api/Chart.yaml index 51a3af3..b478760 100644 --- a/charts/aina-tts-api/Chart.yaml +++ b/charts/aina-tts-api/Chart.yaml @@ -1,13 +1,14 @@ apiVersion: v2 name: aina-tts-api -version: 0.1.0 -description: RestFUL api and web interface to serve coqui TTS models +version: 0.2.0 +description: RestFUL api and web interface to serve matcha TTS models home: https://github.com/projecte-aina/tts-api keywords: - ai - tts - - coqui + - matcha + - matxa - cat maintainers: - name: Projecte Aina diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 7459165..b738da1 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -1,9 +1,10 @@ -version: '3.9' services: server: build: context: . dockerfile: Dockerfile.dev + secrets: + - "HF_TOKEN" container_name: fastapi-dev environment: - SPEECH_SPEED=${SPEECH_SPEED-1.0} @@ -12,4 +13,6 @@ services: - .:/app ports: - '8000:8000' - shm_size: ${SHM_SIZE-2gb} +secrets: + HF_TOKEN: + environment: "HF_TOKEN" \ No newline at end of file diff --git a/docker-compose-gpu.yml b/docker-compose-gpu.yml index 1074a5f..6814865 100644 --- a/docker-compose-gpu.yml +++ b/docker-compose-gpu.yml @@ -3,17 +3,21 @@ services: server: build: context: . + secrets: + - "HF_TOKEN" environment: - SPEECH_SPEED=${SPEECH_SPEED} - USE_CUDA=True restart: unless-stopped ports: - '8000:8000' - shm_size: ${SHM_SIZE} deploy: resources: reservations: devices: - driver: nvidia count: all - capabilities: [gpu] \ No newline at end of file + capabilities: [gpu] +secrets: + HF_TOKEN: + environment: "HF_TOKEN" \ No newline at end of file diff --git a/docker-compose-test.yml b/docker-compose-test.yml index 64019d9..faddd26 100644 --- a/docker-compose-test.yml +++ b/docker-compose-test.yml @@ -9,5 +9,4 @@ services: volumes: - .:/app ports: - - '8000:8000' - shm_size: ${SHM_SIZE-2gb} + - '8000:8000' \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 5a81d57..23762f2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,12 +1,15 @@ -version: '3.9' services: server: build: context: . + secrets: + - "HF_TOKEN" restart: unless-stopped ports: - '8000:8000' - shm_size: ${SHM_SIZE:-2g} environment: - SPEECH_SPEED=${SPEECH_SPEED:-0.9} # Default to 0.9 if SPEECH_SPEED is not set - USE_CUDA=${USE_CUDA:-0} # Default to 0 if USE_CUDA is not set +secrets: + HF_TOKEN: + environment: "HF_TOKEN" \ No newline at end of file diff --git a/scripts/change_model.py b/scripts/change_model.py deleted file mode 100644 index d0783f5..0000000 --- a/scripts/change_model.py +++ /dev/null @@ -1,14 +0,0 @@ -import torch -import json - -speakers = torch.load('../models/vits_ca/speakers.pth') -print(type(speakers)) -conv = [line.strip().split(',') for line in open('speakers_conversion.csv').readlines()] -new_speakers = {} -for source, target in conv: - id = speakers.get(source) - if id: - new_speakers[target] = source -with open('speaker_ids.json', 'w') as out: - json.dump(new_speakers, out) -print(new_speakers) diff --git a/server/modules/tts_request_model.py b/server/modules/tts_request_model.py index 6048a33..e1aa539 100644 --- a/server/modules/tts_request_model.py +++ b/server/modules/tts_request_model.py @@ -12,8 +12,8 @@ class TTSRequestModel(BaseModel): "json_schema_extra": { "examples": [ { - "language": "Foo", - "voice": "f_cen_095", + "language": "ca-es", + "voice": "olga", "type": "text", "text": "hola que tal" } diff --git a/server/templates/index.html b/server/templates/index.html index 67f46b4..0e9b7ab 100644 --- a/server/templates/index.html +++ b/server/templates/index.html @@ -5,8 +5,8 @@ - - + + TTS engine @@ -35,7 +35,7 @@ - + + - - - - - -

-
-
-
    -
- -

WebSocket Audio Streaming

- - {%if use_gst%} - - {%endif%} - - - - -

- - {%if use_multi_speaker%} - Trieu un locutor: -

- {%endif%} - -
-
-
- - - - - - - \ No newline at end of file diff --git a/server/views/api/api.py b/server/views/api/api.py index 04b47c9..50e0cd7 100644 --- a/server/views/api/api.py +++ b/server/views/api/api.py @@ -7,7 +7,6 @@ import numpy as np from fastapi import APIRouter from starlette.responses import JSONResponse -from starlette.websockets import WebSocket from fastapi.responses import StreamingResponse, HTMLResponse from fastapi import Request from functools import partial @@ -19,7 +18,6 @@ # from TTS.config import load_config from server.modules.tts_request_model import TTSRequestModel -from server.audio_utils.audio_utils import generate_audio, play_audio from server.exceptions import LanguageException, SpeakerException from server.helper.config import ConfigONNX from server.workers.workers import worker_onnx_audio_multiaccent @@ -49,12 +47,6 @@ def parameters(): content={"speech_speed": config.speech_speed, "use_cuda": config.use_cuda}, ) - -@route.get("/websocket-demo", response_class=HTMLResponse) -def websocket_demo(request: Request): - speaker_config_attributes = ConfigONNX().speakerConfigAttributes.__dict__ - return templates.TemplateResponse("websocket_demo.html",{"request": request, **speaker_config_attributes}) - ''' @route.get("/details", response_class=HTMLResponse) def details(request: Request): @@ -217,30 +209,3 @@ def tts(request: TTSRequestModel): # save_wav(merged_wavs, out) return StreamingResponse(out, media_type="audio/wav") - - -@route.websocket_route("/audio-stream") -async def stream_audio(websocket: WebSocket): - await websocket.accept() - - audio_queue = asyncio.Queue() - - try: - while True: - received_data = await websocket.receive_json() - - sentences = segmenter.segment(received_data.get("text")) - voice = received_data.get("voice") - language = received_data.get("language") - - # create a separate task for audio generation - generator_task = asyncio.create_task(generate_audio(sentences, voice, langauge, audio_queue)) - - # create a task for audio playing - player_task = asyncio.create_task(play_audio(audio_queue, websocket)) - - # wait for both tasks to complete - await asyncio.gather(generator_task, player_task) - - except Exception as e: - traceback.print_exc()