-
-
Notifications
You must be signed in to change notification settings - Fork 2.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
10 changed files
with
344 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
.DEFAULT_GOAL := install | ||
|
||
.PHONY: install | ||
install: protogen | ||
bash install.sh | ||
|
||
.PHONY: protogen | ||
protogen: backend_pb2_grpc.py backend_pb2.py | ||
|
||
.PHONY: protogen-clean | ||
protogen-clean: | ||
$(RM) backend_pb2_grpc.py backend_pb2.py | ||
|
||
backend_pb2_grpc.py backend_pb2.py: | ||
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto | ||
|
||
.PHONY: clean | ||
clean: protogen-clean | ||
rm -rf venv __pycache__ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,155 @@ | ||
#!/usr/bin/env python3 | ||
""" | ||
Extra gRPC server for OpenVoice models. | ||
""" | ||
from concurrent import futures | ||
|
||
import argparse | ||
import signal | ||
import sys | ||
import os | ||
import torch | ||
from openvoice import se_extractor | ||
from openvoice.api import ToneColorConverter | ||
from melo.api import TTS | ||
|
||
import time | ||
import backend_pb2 | ||
import backend_pb2_grpc | ||
|
||
import grpc | ||
|
||
|
||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24 | ||
|
||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 | ||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) | ||
|
||
# Implement the BackendServicer class with the service methods | ||
class BackendServicer(backend_pb2_grpc.BackendServicer): | ||
""" | ||
A gRPC servicer for the backend service. | ||
This class implements the gRPC methods for the backend service, including Health, LoadModel, and Embedding. | ||
""" | ||
def Health(self, request, context): | ||
""" | ||
A gRPC method that returns the health status of the backend service. | ||
Args: | ||
request: A HealthRequest object that contains the request parameters. | ||
context: A grpc.ServicerContext object that provides information about the RPC. | ||
Returns: | ||
A Reply object that contains the health status of the backend service. | ||
""" | ||
return backend_pb2.Reply(message=bytes("OK", 'utf-8')) | ||
|
||
def LoadModel(self, request, context): | ||
""" | ||
A gRPC method that loads a model into memory. | ||
Args: | ||
request: A LoadModelRequest object that contains the request parameters. | ||
context: A grpc.ServicerContext object that provides information about the RPC. | ||
Returns: | ||
A Result object that contains the result of the LoadModel operation. | ||
""" | ||
model_name = request.Model | ||
try: | ||
|
||
self.clonedVoice = False | ||
# Assume directory from request.ModelFile. | ||
# Only if request.LoraAdapter it's not an absolute path | ||
if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath): | ||
# get base path of modelFile | ||
modelFileBase = os.path.dirname(request.ModelFile) | ||
request.AudioPath = os.path.join(modelFileBase, request.AudioPath) | ||
if request.AudioPath != "": | ||
self.clonedVoice = True | ||
|
||
self.modelpath = request.ModelFile | ||
self.speaker = request.Type | ||
self.ClonedVoicePath = request.AudioPath | ||
|
||
ckpt_converter = request.Model+'/converter' | ||
device = "cuda:0" if torch.cuda.is_available() else "cpu" | ||
self.device = device | ||
self.tone_color_converter = None | ||
if self.clonedVoice: | ||
self.tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device) | ||
self.tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth') | ||
|
||
except Exception as err: | ||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") | ||
|
||
return backend_pb2.Result(message="Model loaded successfully", success=True) | ||
|
||
def TTS(self, request, context): | ||
model_name = request.model | ||
if model_name == "": | ||
return backend_pb2.Result(success=False, message="request.model is required") | ||
try: | ||
# Speed is adjustable | ||
speed = 1.0 | ||
model = TTS(language=request.voice, device=self.device) | ||
speaker_ids = model.hps.data.spk2id | ||
speaker_key = self.speaker | ||
modelpath = self.modelpath | ||
for s in speaker_ids.keys(): | ||
print(f"Speaker: {s} - ID: {speaker_ids[s]}") | ||
speaker_id = speaker_ids[speaker_key] | ||
speaker_key = speaker_key.lower().replace('_', '-') | ||
source_se = torch.load(f'{modelpath}/base_speakers/ses/{speaker_key}.pth', map_location=self.device) | ||
model.tts_to_file(request.text, speaker_id, request.dst, speed=speed) | ||
if self.clonedVoice: | ||
reference_speaker = self.ClonedVoicePath | ||
target_se, audio_name = se_extractor.get_se(reference_speaker, self.tone_color_converter, vad=False) | ||
# Run the tone color converter | ||
encode_message = "@MyShell" | ||
self.tone_color_converter.convert( | ||
audio_src_path=request.dst, | ||
src_se=source_se, | ||
tgt_se=target_se, | ||
output_path=request.dst, | ||
message=encode_message) | ||
|
||
print("[OpenVoice] TTS generated!", file=sys.stderr) | ||
print("[OpenVoice] TTS saved to", request.dst, file=sys.stderr) | ||
print(request, file=sys.stderr) | ||
except Exception as err: | ||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") | ||
return backend_pb2.Result(success=True) | ||
|
||
def serve(address): | ||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)) | ||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server) | ||
server.add_insecure_port(address) | ||
server.start() | ||
print("[OpenVoice] Server started. Listening on: " + address, file=sys.stderr) | ||
|
||
# Define the signal handler function | ||
def signal_handler(sig, frame): | ||
print("[OpenVoice] Received termination signal. Shutting down...") | ||
server.stop(0) | ||
sys.exit(0) | ||
|
||
# Set the signal handlers for SIGINT and SIGTERM | ||
signal.signal(signal.SIGINT, signal_handler) | ||
signal.signal(signal.SIGTERM, signal_handler) | ||
|
||
try: | ||
while True: | ||
time.sleep(_ONE_DAY_IN_SECONDS) | ||
except KeyboardInterrupt: | ||
server.stop(0) | ||
|
||
if __name__ == "__main__": | ||
parser = argparse.ArgumentParser(description="Run the gRPC server.") | ||
parser.add_argument( | ||
"--addr", default="localhost:50051", help="The address to bind the server to." | ||
) | ||
args = parser.parse_args() | ||
print(f"[OpenVoice] startup: {args}", file=sys.stderr) | ||
serve(args.addr) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
#!/bin/bash | ||
set -e | ||
|
||
source $(dirname $0)/../common/libbackend.sh | ||
|
||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links. | ||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match. | ||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index | ||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index | ||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then | ||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" | ||
fi | ||
|
||
installRequirements | ||
|
||
python -m unidic download |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ | ||
intel-extension-for-pytorch | ||
torch | ||
optimum[openvino] | ||
grpcio==1.63.0 | ||
protobuf | ||
librosa==0.9.1 | ||
faster-whisper==0.9.0 | ||
pydub==0.25.1 | ||
wavmark==0.0.3 | ||
numpy==1.22.0 | ||
eng_to_ipa==0.0.2 | ||
inflect==7.0.0 | ||
unidecode==1.3.7 | ||
whisper-timestamped==1.14.2 | ||
openai | ||
python-dotenv | ||
pypinyin==0.50.0 | ||
cn2an==0.5.22 | ||
jieba==0.42.1 | ||
gradio==3.48.0 | ||
langid==1.1.6 | ||
git+https://github.com/myshell-ai/MeloTTS.git |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
grpcio==1.63.0 | ||
protobuf | ||
librosa==0.9.1 | ||
faster-whisper==0.9.0 | ||
pydub==0.25.1 | ||
wavmark==0.0.3 | ||
numpy==1.22.0 | ||
eng_to_ipa==0.0.2 | ||
inflect==7.0.0 | ||
unidecode==1.3.7 | ||
whisper-timestamped==1.14.2 | ||
openai | ||
python-dotenv | ||
pypinyin==0.50.0 | ||
cn2an==0.5.22 | ||
jieba==0.42.1 | ||
gradio==3.48.0 | ||
langid==1.1.6 | ||
git+https://github.com/myshell-ai/MeloTTS.git |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
#!/bin/bash | ||
source $(dirname $0)/../common/libbackend.sh | ||
|
||
startBackend $@ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
""" | ||
A test script to test the gRPC service | ||
""" | ||
import unittest | ||
import subprocess | ||
import time | ||
import backend_pb2 | ||
import backend_pb2_grpc | ||
|
||
import grpc | ||
|
||
|
||
class TestBackendServicer(unittest.TestCase): | ||
""" | ||
TestBackendServicer is the class that tests the gRPC service | ||
""" | ||
def setUp(self): | ||
""" | ||
This method sets up the gRPC service by starting the server | ||
""" | ||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) | ||
time.sleep(10) | ||
|
||
def tearDown(self) -> None: | ||
""" | ||
This method tears down the gRPC service by terminating the server | ||
""" | ||
self.service.terminate() | ||
self.service.wait() | ||
|
||
def test_server_startup(self): | ||
""" | ||
This method tests if the server starts up successfully | ||
""" | ||
try: | ||
self.setUp() | ||
with grpc.insecure_channel("localhost:50051") as channel: | ||
stub = backend_pb2_grpc.BackendStub(channel) | ||
response = stub.Health(backend_pb2.HealthMessage()) | ||
self.assertEqual(response.message, b'OK') | ||
except Exception as err: | ||
print(err) | ||
self.fail("Server failed to start") | ||
finally: | ||
self.tearDown() | ||
|
||
def test_load_model(self): | ||
""" | ||
This method tests if the model is loaded successfully | ||
""" | ||
try: | ||
self.setUp() | ||
with grpc.insecure_channel("localhost:50051") as channel: | ||
stub = backend_pb2_grpc.BackendStub(channel) | ||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="checkpoints_v2")) | ||
self.assertTrue(response.success) | ||
self.assertEqual(response.message, "Model loaded successfully") | ||
except Exception as err: | ||
print(err) | ||
self.fail("LoadModel service failed") | ||
finally: | ||
self.tearDown() | ||
|
||
def test_tts(self): | ||
""" | ||
This method tests if the embeddings are generated successfully | ||
""" | ||
try: | ||
self.setUp() | ||
with grpc.insecure_channel("localhost:50051") as channel: | ||
stub = backend_pb2_grpc.BackendStub(channel) | ||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen")) | ||
self.assertTrue(response.success) | ||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story") | ||
tts_response = stub.TTS(tts_request) | ||
self.assertIsNotNone(tts_response) | ||
except Exception as err: | ||
print(err) | ||
self.fail("TTS service failed") | ||
finally: | ||
self.tearDown() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
#!/bin/bash | ||
set -e | ||
|
||
source $(dirname $0)/../common/libbackend.sh | ||
|
||
# Download checkpoints if not present | ||
if [ ! -d "checkpoints_v2" ]; then | ||
wget https://myshell-public-repo-hosting.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip | ||
unzip checkpoints_v2.zip | ||
fi | ||
|
||
runUnittests |