Skip to content

Commit

Permalink
Add base_url_config args.
Browse files Browse the repository at this point in the history
  • Loading branch information
zh-plus committed May 7, 2024
1 parent afc5731 commit 5aaf455
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 12 deletions.
22 changes: 14 additions & 8 deletions openlrc/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def message(self, messages_list: Union[List[Dict], List[List[Dict]]],
@_register_chatbot
class GPTBot(ChatBot):
def __init__(self, model='gpt-3.5-turbo-0125', temperature=1, top_p=1, retry=8, max_async=16, json_mode=False,
fee_limit=0.05, proxy=None):
fee_limit=0.05, proxy=None, base_url_config=None):
# Pricing for 1M tokens, info from https://openai.com/pricing
pricing = {
'gpt-3.5-turbo-0125': (0.5, 1.5),
Expand All @@ -145,9 +145,11 @@ def __init__(self, model='gpt-3.5-turbo-0125', temperature=1, top_p=1, retry=8,

super().__init__(pricing, temperature, top_p, retry, max_async, fee_limit)

self.async_client = AsyncGPTClient(api_key=os.environ['OPENAI_API_KEY'], http_client=httpx.AsyncClient(
proxies=proxy,
))
self.async_client = AsyncGPTClient(
api_key=os.environ['OPENAI_API_KEY'],
http_client=httpx.AsyncClient(proxies=proxy),
base_url=base_url_config['openai'] if base_url_config else None
)

self.model = model
self.temperature = temperature
Expand Down Expand Up @@ -215,7 +217,7 @@ async def _create_achat(self, messages: List[Dict], output_checker: Callable = l
@_register_chatbot
class ClaudeBot(ChatBot):
def __init__(self, model='claude-3-sonnet-20240229', temperature=1, top_p=1, retry=8, max_async=16, fee_limit=0.2,
proxy=None):
proxy=None, base_url_config=None):
# Pricing for 1M tokens, info from https://docs.anthropic.com/claude/docs/models-overview#model-comparison
pricing = {
'claude-3-opus-20240229': (15, 75),
Expand All @@ -225,9 +227,13 @@ def __init__(self, model='claude-3-sonnet-20240229', temperature=1, top_p=1, ret

super().__init__(pricing, temperature, top_p, retry, max_async, fee_limit)

self.async_client = AsyncAnthropic(api_key=os.environ['ANTHROPIC_API_KEY'], http_client=httpx.AsyncClient(
proxies=proxy,
))
self.async_client = AsyncAnthropic(
api_key=os.environ['ANTHROPIC_API_KEY'],
http_client=httpx.AsyncClient(
proxies=proxy
),
base_url=base_url_config['anthropic'] if base_url_config else None
)

self.model = model
self.retry = retry
Expand Down
8 changes: 6 additions & 2 deletions openlrc/openlrc.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,21 @@ class LRCer:
asr_options: Parameters for whisper model.
vad_options: Parameters for VAD model.
proxy: Proxy for openai requests. e.g. 'http://127.0.0.1:7890'
base_url_config: Base URL dict for OpenAI & Anthropic.
e.g. {'openai': 'https://openai.justsong.cn/', 'anthropic': 'https://api.g4f.icu'}
Default: ``None``
"""

def __init__(self, whisper_model='large-v3', compute_type='float16', chatbot_model: str = 'gpt-3.5-turbo',
fee_limit=0.1, consumer_thread=4, asr_options=None, vad_options=None, preprocess_options=None,
proxy=None):
proxy=None, base_url_config=None):
self.chatbot_model = chatbot_model
self.fee_limit = fee_limit
self.api_fee = 0 # Can be updated in different thread, operation should be thread-safe
self.from_video = set()
self.context: Context = Context()
self.proxy = proxy
self.base_url_config = base_url_config

self._lock = Lock()
self.exception = None
Expand Down Expand Up @@ -180,7 +184,7 @@ def _translate(self, audio_name, prompter, target_lang, transcribed_opt_sub, tra
if not translated_path.exists():
# Translate the transcribed json
translator = LLMTranslator(chatbot_model=self.chatbot_model, prompter=prompter, fee_limit=self.fee_limit,
proxy=self.proxy)
proxy=self.proxy, base_url_config=self.base_url_config)
context = self.context

target_texts = translator.translate(
Expand Down
7 changes: 5 additions & 2 deletions openlrc/translate.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def translate(self, texts: Union[str, List[str]], src_lang, target_lang):

class LLMTranslator(Translator):
def __init__(self, chatbot_model: str = 'gpt-3.5-turbo', prompter: str = 'base_trans', fee_limit=0.1,
chunk_size=30, intercept_line=None, proxy=None):
chunk_size=30, intercept_line=None, proxy=None, base_url_config=None):
"""
Args:
chatbot_model: Chatbot instance. Choices can be found using `LLMTranslator().list_chatbots()`.
Expand All @@ -35,6 +35,8 @@ def __init__(self, chatbot_model: str = 'gpt-3.5-turbo', prompter: str = 'base_t
chunk_size (int): Use a small chunk size (<20) for speed (more asynchronous calls) and to enhance translation
stability (keeping audio timeline consistency).
intercept_line (int): Intercepted text line number.
proxy (str): Proxy server. e.g. http://127.0.0.1:7890
base_url_config (dict): Base URL configuration for the chatbot API.
"""
if prompter not in prompter_map:
raise ValueError(f'Prompter {prompter} not found.')
Expand All @@ -43,7 +45,8 @@ def __init__(self, chatbot_model: str = 'gpt-3.5-turbo', prompter: str = 'base_t
raise ValueError(f'Chatbot {chatbot_model} not supported.')

chatbot_category = chatbot_map[model2chatbot[chatbot_model]]
self.chatbot = chatbot_category(model=chatbot_model, fee_limit=fee_limit, proxy=proxy, temperature=0.7)
self.chatbot = chatbot_category(model=chatbot_model, fee_limit=fee_limit, proxy=proxy, temperature=0.7,
base_url_config=base_url_config)

self.prompter = prompter
self.fee_limit = fee_limit
Expand Down

0 comments on commit 5aaf455

Please sign in to comment.