diff --git a/README.md b/README.md index 95b7b411..29bc9172 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ Once you have the above docker setup and running, you can create agents and init "stream": true, "provider_config": { "voice": "Meera - high quality, emotive", - "model": "eleven_multilingual_v2", + "model": "eleven_turbo_v2_5", "voice_id": "TTa58Hl9lmhnQEvhp1WM" }, "buffer_size": 100.0 diff --git a/bolna/synthesizer/elevenlabs_synthesizer.py b/bolna/synthesizer/elevenlabs_synthesizer.py index 67622000..6814df15 100644 --- a/bolna/synthesizer/elevenlabs_synthesizer.py +++ b/bolna/synthesizer/elevenlabs_synthesizer.py @@ -18,14 +18,14 @@ class ElevenlabsSynthesizer(BaseSynthesizer): - def __init__(self, voice, voice_id, model="eleven_multilingual_v1", audio_format="mp3", sampling_rate="16000", + def __init__(self, voice, voice_id, model="eleven_turbo_v2_5", audio_format="mp3", sampling_rate="16000", stream=False, buffer_size=400, temperature = 0.9, similarity_boost = 0.5, synthesier_key=None, caching=True, **kwargs): super().__init__(stream) self.api_key = os.environ["ELEVENLABS_API_KEY"] if synthesier_key is None else synthesier_key self.voice = voice_id self.use_turbo = kwargs.get("use_turbo", False) - self.model = "eleven_turbo_v2" if self.use_turbo else "eleven_multilingual_v2" + self.model = model logger.info(f"Using turbo or not {self.model}") self.stream = False # Issue with elevenlabs streaming that we need to always send the text quickly self.websocket_connection = None diff --git a/local_setup/README.md b/local_setup/README.md index 14d95bf1..a512d8ae 100644 --- a/local_setup/README.md +++ b/local_setup/README.md @@ -67,7 +67,7 @@ Once you have the above docker setup and running, you can create agents and init "stream": true, "provider_config": { "voice": "Meera - high quality, emotive", - "model": "eleven_multilingual_v2", + "model": "eleven_turbo_v2_5", "voice_id": "TTa58Hl9lmhnQEvhp1WM" }, "buffer_size": 100.0