diff --git a/.gitignore b/.gitignore index 1c05227d..86abceb6 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,5 @@ venv env .env __pycache__ +certificate.* +private.* diff --git a/_version.py b/_version.py index 32b71806..323cb0be 100644 --- a/_version.py +++ b/_version.py @@ -20,7 +20,7 @@ from packaging import version -__version__ = "5.2.10" +__version__ = "5.4.2" def version_major() -> int: diff --git a/bot_handler.py b/bot_handler.py index 4610d122..881adcfe 100644 --- a/bot_handler.py +++ b/bot_handler.py @@ -64,6 +64,7 @@ BOT_COMMAND_CHAT = "chat" BOT_COMMAND_MODULE = "module" BOT_COMMAND_STYLE = "style" +BOT_COMMAND_MODEL = "model" BOT_COMMAND_CLEAR = "clear" BOT_COMMAND_LANG = "lang" BOT_COMMAND_CHAT_ID = "chatid" @@ -121,6 +122,8 @@ def __init__( logging_queue: multiprocessing.Queue, queue_handler_: queue_handler.QueueHandler, modules: Dict, + web_cooldown_timer: multiprocessing.Value, + web_request_lock: multiprocessing.Lock, ): self.config = config self.config_file = config_file @@ -130,6 +133,10 @@ def __init__( self.queue_handler = queue_handler_ self.modules = modules + # LMAO + self.web_cooldown_timer = web_cooldown_timer + self.web_request_lock = web_request_lock + self.prevent_shutdown_flag = multiprocessing.Value(c_bool, False) self._application = None @@ -159,7 +166,17 @@ def start_bot(self): # Build bot telegram_config = self.config.get("telegram") - builder = ApplicationBuilder().token(telegram_config.get("api_key")) + proxy = telegram_config.get("proxy") + if proxy: + logging.info(f"Using proxy {proxy} for Telegram bot") + builder = ( + ApplicationBuilder() + .token(telegram_config.get("api_key")) + .proxy(proxy) + .get_updates_proxy(proxy) + ) + else: + builder = ApplicationBuilder().token(telegram_config.get("api_key")) self._application = builder.build() # Set commands @@ -172,6 +189,7 @@ def start_bot(self): self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_CHAT, self.bot_module_request)) self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_MODULE, self.bot_command_module)) self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_STYLE, self.bot_command_style)) + self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_MODEL, self.bot_command_model)) self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_CLEAR, self.bot_command_clear)) self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_LANG, self.bot_command_lang)) self._application.add_handler(CaptionCommandHandler(BOT_COMMAND_CHAT_ID, self.bot_command_chatid)) @@ -302,15 +320,13 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP return # Parse data from markup - action, data_, reply_message_id = data_.split("|") + action, data_, argument_ = data_.split("|") if not action: raise Exception("No action in callback data") if not data_: data_ = None - if not reply_message_id: - reply_message_id = None - else: - reply_message_id = int(reply_message_id.strip()) + if not argument_: + argument_ = None # Get user banned, user = await self._user_get_check(update, context, prompt_language_selection=False) @@ -329,6 +345,12 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP # Regenerate request if action == "regenerate": + # Parse message ID + if not argument_: + reply_message_id = None + else: + reply_message_id = int(argument_.strip()) + # Get last message ID reply_message_id_last = self.users_handler.get_key(0, "reply_message_id_last", user=user) if reply_message_id_last is None or reply_message_id_last != reply_message_id: @@ -364,6 +386,12 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP # Continue generating elif action == "continue": + # Parse message ID + if not argument_: + reply_message_id = None + else: + reply_message_id = int(argument_.strip()) + # Get last message ID reply_message_id_last = self.users_handler.get_key(0, "reply_message_id_last", user=user) if reply_message_id_last is None or reply_message_id_last != reply_message_id: @@ -385,6 +413,12 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP # Send suggestion elif action == "suggestion": + # Parse message ID + if not argument_: + reply_message_id = None + else: + reply_message_id = int(argument_.strip()) + # Get last message ID reply_message_id_last = self.users_handler.get_key(0, "reply_message_id_last", user=user) if reply_message_id_last is None or reply_message_id_last != reply_message_id: @@ -421,6 +455,12 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP # Stop generating elif action == "stop": + # Parse message ID + if not argument_: + reply_message_id = None + else: + reply_message_id = int(argument_.strip()) + # Get last message ID reply_message_id_last = self.users_handler.get_key(0, "reply_message_id_last", user=user) if reply_message_id_last is None or reply_message_id_last != reply_message_id: @@ -466,6 +506,10 @@ async def query_callback(self, update: Update, context: ContextTypes.DEFAULT_TYP elif action == "style": await self._bot_command_style_raw(data_, user, context) + # Change model + elif action == "model": + await self._bot_command_model_raw(data_, argument_, user, context) + # Change language elif action == "lang": await self._bot_command_lang_raw(data_, user, context) @@ -752,8 +796,20 @@ async def bot_command_restart(self, update: Update, context: ContextTypes.DEFAUL continue logging.info(f"Trying to load and initialize {module_name} module") try: + use_web = ( + module_name.startswith("lmao_") + and module_name in self.config.get("modules").get("lmao_web_for_modules", []) + and "lmao_web_api_url" in self.config.get("modules") + ) module = module_wrapper_global.ModuleWrapperGlobal( - module_name, self.config, self.messages, self.users_handler, self.logging_queue + module_name, + self.config, + self.messages, + self.users_handler, + self.logging_queue, + use_web=use_web, + web_cooldown_timer=self.web_cooldown_timer, + web_request_lock=self.web_request_lock, ) self.modules[module_name] = module reload_logs += f"Intialized and loaded {module_name} module\n" @@ -1042,6 +1098,148 @@ async def _bot_command_style_raw(self, style: str or None, user: Dict, context: context, ) + async def bot_command_model(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """/model commands callback + + Args: + update (Update): update object from bot's callback + context (ContextTypes.DEFAULT_TYPE): context object from bot's callback + """ + # Get user + banned, user = await self._user_get_check(update, context) + if user is None: + return + user_id = user.get("user_id") + user_name = self.users_handler.get_key(0, "user_name", "", user=user) + lang_id = self.users_handler.get_key(0, "lang_id", user=user) + + # Log command + logging.info(f"/model command from {user_name} ({user_id})") + + # Exit if banned + if banned: + return + + module_id = self.users_handler.get_key(0, "module", self.config.get("modules").get("default"), user=user) + + model = None + + # User specified model + if context.args and len(context.args) >= 1: + try: + model = context.args[0].strip().lower() + + # Get available models + current_module_id = self.users_handler.get_key( + 0, "module", self.config.get("modules").get("default"), user=user + ) + available_models = self.config.get(current_module_id).get("models", []) + + # Get current model + model_current = self.config.get(module_id).get("model_default") + model_current = self.users_handler.get_key(0, f"{module_id}_model", model_current, user=user) + + # Check + if not model_current or len(available_models) == 0: + await _send_safe( + user_id, + self.messages.get_message("model_no_models", lang_id=lang_id), + context, + ) + return + + # Check + if model not in available_models: + raise Exception(f"No model {model} in {' '.join(available_models)}") + except Exception as e: + logging.error("Error retrieving requested model", exc_info=e) + await _send_safe( + user["user_id"], + self.messages.get_message("model_change_error", lang_id=lang_id).format(error_text=str(e)), + context, + ) + return + + # Change model or ask the user + await self._bot_command_model_raw(module_id, model, user, context) + + async def _bot_command_model_raw( + self, module_id: str or None, model: str or None, user: Dict, context: ContextTypes.DEFAULT_TYPE + ) -> None: + """Changes model of module + + Args: + module_id (str or None): id of module to change model of + model (str or None): model name or None to ask user + user (Dict): user's data as dictionary + context (ContextTypes.DEFAULT_TYPE): context object from bot's callback + """ + user_id = user.get("user_id") + lang_id = self.users_handler.get_key(0, "lang_id", user=user) + + # Extract current user's module and model + module_icon_names = self.messages.get_message("modules", lang_id=lang_id) + if not module_id: + module_id = self.users_handler.get_key(0, "module", self.config.get("modules").get("default"), user=user) + current_module_name = module_icon_names.get(module_id).get("name") + current_module_icon = module_icon_names.get(module_id).get("icon") + current_module_name = f"{current_module_icon} {current_module_name}" + + # Get available models + available_models = self.config.get(module_id).get("models", []) + + # Get current model + model_current = self.config.get(module_id).get("model_default") + model_current = self.users_handler.get_key(0, f"{module_id}_model", model_current, user=user) + + # Check + if not model_current or len(available_models) == 0: + await _send_safe( + user_id, + self.messages.get_message("model_no_models", lang_id=lang_id), + context, + ) + return + + # Ask user + if not model: + buttons = [] + for model_ in available_models: + buttons.append(InlineKeyboardButton(model_, callback_data=f"model|{module_id}|{model_}")) + + await _send_safe( + user_id, + self.messages.get_message("model_select", lang_id=lang_id).format( + module_name=current_module_name, current_model=model_current + ), + context, + reply_markup=InlineKeyboardMarkup(bot_sender.build_menu(buttons)), + ) + return + + # Change model + try: + # Change model of user + self.users_handler.set_key(user_id, f"{module_id}_model", model) + + # Send confirmation + await _send_safe( + user_id, + self.messages.get_message("model_changed", lang_id=lang_id).format( + module_name=current_module_name, changed_model=model + ), + context, + ) + + # Error changing model + except Exception as e: + logging.error("Error changing model", exc_info=e) + await _send_safe( + user_id, + self.messages.get_message("model_change_error", lang_id=lang_id).format(error_text=str(e)), + context, + ) + ######################################## # General (non-modules) commands below # ######################################## diff --git a/bot_sender.py b/bot_sender.py index 60d9046d..1b52b17d 100644 --- a/bot_sender.py +++ b/bot_sender.py @@ -226,6 +226,14 @@ def build_markup( ) buttons.append(button_style) + # Add change model button + if request_response.module_name in module_wrapper_global.MODULES_WITH_MODELS: + button_model = InlineKeyboardButton( + messages_.get_message("button_model_change", user_id=user_id), + callback_data=f"model|{request_response.module_name}|", + ) + buttons.append(button_model) + # Add change module button for all modules button_module = InlineKeyboardButton( messages_.get_message("button_module", user_id=user_id), diff --git a/config.json b/config.json index 18856421..f7970108 100644 --- a/config.json +++ b/config.json @@ -1,20 +1,35 @@ { "__comment01__": "Version of config file. Please don't change it", - "config_version": 6, + "config_version": 8, "__comment02__": "General config of enabled modules", - "__comment03__": "Available modules: lmao_chatgpt, lmao_ms_copilot, ms_copilot, ms_copilot_designer, gemini", + "__comment03__": "Available modules: lmao_chatgpt, lmao_ms_copilot, ms_copilot, ms_copilot_designer, gemini, groq", "__comment04__": "NOTE: ms_copilot and ms_copilot_designer modules are DEPRECATED and will be removed soon", "modules": { "__comment01__": "Enabled modules", "enabled": [ "lmao_chatgpt", "lmao_ms_copilot", - "gemini" + "gemini", + "groq" ], "__comment02__": "Default (initial) module for handling user messages", - "default": "lmao_chatgpt" + "default": "lmao_chatgpt", + + "__comment03__": "Specify modules for which to use web API instead of python package. ex: ['lmao_chatgpt']", + "lmao_web_for_modules": [], + + "__comment04__": "LMAO web API URL (without tailing slash)", + "lmao_web_api_url": "http://localhost:1312/api", + + "__comment05__": "If needed, specify LMAO API token (from --tokens-manage) argument", + "__comment06__": "See for more info", + "lmao_token_manage": "", + + "__comment07__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", + "__comment08__": "Use http://username:password@ip:port format in case of proxy with authorization", + "lmao_web_proxy": "" }, "__comment05__": "Paths to files and directories", @@ -85,6 +100,10 @@ "command": "module", "description": "↕ Change module to chat with" }, + { + "command": "model", + "description": "⚙️ Change model of module (ex. For 🔴 Groq module)" + }, { "command": "clear", "description": "🧹 Clear chat history" @@ -97,7 +116,11 @@ "command": "chatid", "description": "🆔 Show your chat_id" } - ] + ], + + "__comment15__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", + "__comment16__": "Use http://username:password@ip:port format in case of proxy with authorization", + "proxy": "" }, "__comment07__": "Save all requests and responses to the files", diff --git a/google_ai_module.py b/google_ai_module.py index 02048a1d..675261c3 100644 --- a/google_ai_module.py +++ b/google_ai_module.py @@ -155,10 +155,12 @@ def process_request(self, request_response: RequestResponseContainer) -> None: module_config = self.config.get(_NAME) # Cool down - if time.time() - self._last_request_time.value <= module_config.get("cooldown_seconds"): - time_to_wait = module_config.get("cooldown_seconds") - (time.time() - self._last_request_time.value) + if time.time() - self._last_request_time.value <= module_config.get("user_cooldown_seconds"): + time_to_wait = module_config.get("user_cooldown_seconds") - ( + time.time() - self._last_request_time.value + ) logging.warning(f"Too frequent requests. Waiting {time_to_wait} seconds...") - time.sleep(self._last_request_time.value + module_config.get("cooldown_seconds") - time.time()) + time.sleep(self._last_request_time.value + module_config.get("user_cooldown_seconds") - time.time()) self._last_request_time.value = time.time() response = None @@ -224,10 +226,6 @@ def process_request(self, request_response: RequestResponseContainer) -> None: # Save conversation ID self.users_handler.set_key(request_response.user_id, f"{_NAME}_conversation_id", conversation_id) - # Error - except Exception as e: - raise e - finally: self.processing_flag.value = False diff --git a/groq_module.py b/groq_module.py new file mode 100644 index 00000000..c662cb13 --- /dev/null +++ b/groq_module.py @@ -0,0 +1,283 @@ +""" +Copyright (C) 2023-2024 Fern Lane + +This file is part of the GPT-Telegramus distribution +(see ) + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +""" + +import time +import uuid +import json +import os +import multiprocessing +import ctypes +import logging +from typing import Dict + +from groq import Groq +import httpx + +import messages +import users_handler +from async_helper import async_helper +from bot_sender import send_message_async +from request_response_container import RequestResponseContainer + +# Self name +_NAME = "groq" + + +class GroqModule: + def __init__( + self, + config: Dict, + messages_: messages.Messages, + users_handler_: users_handler.UsersHandler, + ) -> None: + """Initializes class variables (must be done in main process) + + Args: + config (Dict): global config + messages_ (messages.Messages): initialized messages handler + users_handler_ (users_handler.UsersHandler): initialized users handler + """ + self.config = config + self.messages = messages_ + self.users_handler = users_handler_ + + # All variables here must be multiprocessing + self.processing_flag = multiprocessing.Value(ctypes.c_bool, False) + self._last_request_time = multiprocessing.Value(ctypes.c_double, 0.0) + + # Don't use this variable outside the module's process + self._model = None + + def initialize(self) -> None: + """Initializes Groq module using official Groq API: + This method must be called from another process + + Raises: + Exception: initialization error + """ + # Internal variables for current process + self._model = None + try: + self.processing_flag.value = False + + # Get module's config + module_config = self.config.get(_NAME) + + # Use proxy + if module_config.get("proxy") and module_config.get("proxy") != "auto": + proxy = module_config.get("proxy") + logging.info(f"Initializing Groq module with proxy {proxy}") + self._model = Groq( + api_key=module_config.get("api_key"), + base_url=module_config.get("base_url"), + http_client=httpx.Client(proxies=proxy), + ) + else: + logging.info("Initializing Groq module without proxy") + self._model = Groq(api_key=module_config.get("api_key"), base_url=module_config.get("base_url")) + + logging.info("Groq module initialized") + + # Reset module and re-raise the error + except Exception as e: + self._model = None + raise e + + def process_request(self, request_response: RequestResponseContainer) -> None: + """Processes request to Groq + + Args: + request_response (RequestResponseContainer): container from the queue + + Raises: + Exception: in case of error + """ + conversations_dir = self.config.get("files").get("conversations_dir") + conversation_id = self.users_handler.get_key(request_response.user_id, f"{_NAME}_conversation_id") + model_name = self.users_handler.get_key( + request_response.user_id, f"{_NAME}_model", self.config.get(_NAME).get("model_default") + ) + + # Check if we are initialized + if self._model is None: + logging.error("Groq not initialized") + request_response.response_text = self.messages.get_message( + "response_error", user_id=request_response.user_id + ).format(error_text="Groq module not initialized") + request_response.error = True + self.processing_flag.value = False + return + + try: + # Set flag that we are currently processing request + self.processing_flag.value = True + + # Get module's config + module_config = self.config.get(_NAME) + + # Cool down + if time.time() - self._last_request_time.value <= module_config.get("user_cooldown_seconds"): + time_to_wait = module_config.get("user_cooldown_seconds") - ( + time.time() - self._last_request_time.value + ) + logging.warning(f"Too frequent requests. Waiting {time_to_wait} seconds...") + time.sleep(self._last_request_time.value + module_config.get("user_cooldown_seconds") - time.time()) + self._last_request_time.value = time.time() + + # Check model name (just in case) + if model_name not in self.config.get(_NAME).get("models"): + logging.warning(f"No model named {model_name}. Using default one") + model_name = self.config.get(_NAME).get("model_default") + + response = None + conversation = [] + + # Try to load conversation + conversation = _load_conversation(conversations_dir, conversation_id) or [] + # Generate new random conversation ID + if conversation_id is None: + conversation_id = f"{_NAME}_{uuid.uuid4()}" + + conversation.append({"role": "user", "content": request_response.request_text}) + + logging.info("Asking Groq...") + response = self._model.chat.completions.create(messages=conversation, model=model_name) + + request_response.response_text = response.choices[0].message.content + role = response.choices[0].message.role + + # Try to save conversation + conversation.append({"role": role, "content": request_response.response_text}) + if not _save_conversation(conversations_dir, conversation_id, conversation): + conversation_id = None + + # Save conversation ID + self.users_handler.set_key(request_response.user_id, f"{_NAME}_conversation_id", conversation_id) + + finally: + self.processing_flag.value = False + + # Finish + async_helper(send_message_async(self.config.get("telegram"), self.messages, request_response, end=True)) + + def clear_conversation_for_user(self, user_id: int) -> None: + """Clears conversation (chat history) for selected user""" + # Get current conversation_id + conversation_id = self.users_handler.get_key(user_id, f"{_NAME}_conversation_id") + if conversation_id is None: + return + + # Delete from API + _delete_conversation(self.config.get("files").get("conversations_dir"), conversation_id) + + # Delete from user + self.users_handler.set_key(user_id, f"{_NAME}_conversation_id", None) + + +def _load_conversation(conversations_dir, conversation_id): + """Tries to load conversation + + Args: + conversations_dir (_type_): _description_ + conversation_id (_type_): _description_ + + Returns: + _type_: content of conversation, None if error + """ + logging.info(f"Loading conversation {conversation_id}") + try: + if conversation_id is None: + logging.info("conversation_id is None. Skipping loading") + return None + + conversation_file = os.path.join(conversations_dir, conversation_id + ".json") + if os.path.exists(conversation_file): + # Load from json file + with open(conversation_file, "r", encoding="utf-8") as json_file: + return json.load(json_file) + else: + logging.warning(f"File {conversation_file} not exists") + + except Exception as e: + logging.warning(f"Error loading conversation {conversation_id}", exc_info=e) + + return None + + +def _save_conversation(conversations_dir, conversation_id, conversation) -> bool: + """Tries to save conversation without raising any error + + Args: + conversations_dir (_type_): _description_ + conversation_id (_type_): _description_ + conversation (_type_): _description_ + + Returns: + bool: True if no error + """ + logging.info(f"Saving conversation {conversation_id}") + try: + if conversation_id is None: + logging.info("conversation_id is None. Skipping saving") + return False + + # Create conversation dir + if not os.path.exists(conversations_dir): + logging.info(f"Creating {conversations_dir} directory") + os.makedirs(conversations_dir) + + # Save as json file + conversation_file = os.path.join(conversations_dir, conversation_id + ".json") + with open(conversation_file, "w+", encoding="utf-8") as json_file: + json.dump(conversation, json_file, indent=4, ensure_ascii=False) + + except Exception as e: + logging.error(f"Error saving conversation {conversation_id}", exc_info=e) + return False + + return True + + +def _delete_conversation(conversations_dir, conversation_id) -> bool: + """Tries to delete conversation without raising any error + + Args: + conversations_dir (_type_): _description_ + conversation_id (_type_): _description_ + + Returns: + bool: True if no error + """ + logging.info(f"Deleting conversation {conversation_id}") + # Delete conversation file if exists + try: + conversation_file = os.path.join(conversations_dir, conversation_id + ".json") + if os.path.exists(conversation_file): + logging.info(f"Deleting {conversation_file} file") + os.remove(conversation_file) + return True + + except Exception as e: + logging.error( + f"Error removing conversation file for conversation {conversation_id}", + exc_info=e, + ) + + return False diff --git a/langs/bel.json b/langs/bel.json index be3701f0..6e06a5d0 100644 --- a/langs/bel.json +++ b/langs/bel.json @@ -51,6 +51,11 @@ "style_precise": "📏 Дакладны", "style_balanced": "⚖️ Збалансаваны", "style_creative": "🎨 Творчы", + "model_changed": "✅ Мадэль з мадуля {module_name} змяніўся на {changed_model}", + "model_select": "Ваш мадэль {module_name} мадуля: {current_model}\nВыберыце новую мадэль {module_name} мадуля,\nабо ігнараваць гэтае паведамленне", + "model_change_error": "❌ Памылка пры змене мадэлі!\n\n{error_text}", + "model_no_models": "❌ Вы не можаце змяніць мадэль бягучага мадуля!", + "button_model_change": "⚙️ Змяніць мадэль", "button_stop_generating": "🛑 Спыніць генерацыю", "button_continue": "⏩ Працягнуць", "button_regenerate": "🔄 Згенераваць зноў", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/eng.json b/langs/eng.json index 781d6762..a0f0366c 100644 --- a/langs/eng.json +++ b/langs/eng.json @@ -51,6 +51,11 @@ "style_precise": "📏 Precise", "style_balanced": "⚖️ Balanced", "style_creative": "🎨 Creative", + "model_changed": "✅ Model from module {module_name} changed to {changed_model}", + "model_select": "Your model of {module_name} module: {current_model}\nSelect new model of {module_name} module,\nor ignore this message", + "model_change_error": "❌ Error changing model!\n\n{error_text}", + "model_no_models": "❌ You can't change model of current module!", + "button_model_change": "⚙️ Change model", "button_stop_generating": "🛑 Stop generating", "button_continue": "⏩ Continue", "button_regenerate": "🔄 Regenerate", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/fas.json b/langs/fas.json index 1071c126..7adc1ecc 100644 --- a/langs/fas.json +++ b/langs/fas.json @@ -51,6 +51,11 @@ "style_precise": "📏 دقیق", "style_balanced": "⚖️ متعادل", "style_creative": "🎨 خلاق", + "model_changed": "✅ مدل از ماژول {module_name} به {changed_model} تغییر کرد", + "model_select": "مدل فعلی شما از ماژول {module_name}: {current_model}\nمدل جدید ماژول {module_name} را انتخاب کنید،\nیا این پیام را نادیده بگیرید", + "model_change_error": "❌ خطا در تغییر مدل!\n\n{error_text}", + "model_no_models": "❌ شما نمی‌توانید مدل ماژول فعلی را تغییر دهید!", + "button_model_change": "⚙️ تغییر مدل", "button_stop_generating": "🛑 تولید را متوقف کنید", "button_continue": "⏩ ادامه", "button_regenerate": "🔄 بازسازی کنید", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/fra.json b/langs/fra.json index a0c6f3cc..ef162581 100644 --- a/langs/fra.json +++ b/langs/fra.json @@ -51,6 +51,11 @@ "style_precise": "📏 Précision", "style_balanced": "⚖️ Équilibré", "style_creative": "🎨 Créative", + "model_changed": "✅ Le modèle du module {module_name} a été changé en {changed_model}", + "model_select": "Votre modèle du module {module_name} : {current_model}\nSélectionnez le nouveau modèle du module {module_name},\nou ignorez ce message", + "model_change_error": "❌ Erreur lors du changement de modèle !\n\n{error_text}", + "model_no_models": "❌ Vous ne pouvez pas changer le modèle du module actuel !", + "button_model_change": "⚙️ Changer de modèle", "button_stop_generating": "🛑 Arrêter de générer", "button_continue": "⏩ Continuer", "button_regenerate": "🔄 Régénérer", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/ind.json b/langs/ind.json index 9a0001ae..58982cde 100644 --- a/langs/ind.json +++ b/langs/ind.json @@ -51,6 +51,11 @@ "style_precise": "📏 Presisi", "style_balanced": "⚖️ Seimbang", "style_creative": "🎨 Kreatif", + "model_changed": "✅ Model dari modul {module_name} berubah menjadi {changed_model}", + "model_select": "Model modul {module_name} Anda: {current_model}\nPilih model baru modul {module_name},\natau abaikan pesan ini", + "model_change_error": "❌ Kesalahan saat mengubah model!\n\n{error_text}", + "model_no_models": "❌ Anda tidak dapat mengubah model modul saat ini!", + "button_model_change": "⚙️ Ubah model", "button_stop_generating": "🛑 Hentikan penghasilan", "button_continue": "⏩ Lanjutkan", "button_regenerate": "🔄 Hasilkan ulang", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/rus.json b/langs/rus.json index 1fed47c0..48d347e1 100644 --- a/langs/rus.json +++ b/langs/rus.json @@ -51,6 +51,11 @@ "style_precise": "📏 Точный", "style_balanced": "⚖️ Сбалансированный", "style_creative": "🎨 Творческий", + "model_changed": "✅ Модель из модуля {module_name} изменена на {changed_model}", + "model_select": "Ваша модель модуля {module_name}: {current_model}\nВыберите новую модель модуля {module_name},\nили проигнорируйте это сообщение", + "model_change_error": "❌ Ошибка при изменении модели!\n\n{error_text}", + "model_no_models": "❌ Вы не можете изменить модель текущего модуля!", + "button_model_change": "⚙️ Изменить модель", "button_stop_generating": "🛑 Прекратить генерацию", "button_continue": "⏩ Продолжить", "button_regenerate": "🔄 Перегенерировать", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/spa.json b/langs/spa.json index 80a3bd8a..1d09fb4d 100644 --- a/langs/spa.json +++ b/langs/spa.json @@ -51,6 +51,11 @@ "style_precise": "📏 Preciso", "style_balanced": "⚖️ Equilibrado", "style_creative": "🎨 Creativo", + "model_changed": "✅ El modelo del módulo {module_name} se cambió a {changed_model}", + "model_select": "Su modelo del módulo {module_name}: {current_model}\nSeleccione el nuevo modelo del módulo {module_name},\no ignore este mensaje", + "model_change_error": "❌ Error al cambiar el modelo.\n\n{error_text}", + "model_no_models": "❌ No puede cambiar el modelo del módulo actual.", + "button_model_change": "⚙️ Cambiar modelo", "button_stop_generating": "🛑 Detener la generación", "button_continue": "⏩ Continuar", "button_regenerate": "🔄 Regenerar", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/tof.json b/langs/tof.json index 038df2dc..02e62084 100644 --- a/langs/tof.json +++ b/langs/tof.json @@ -51,6 +51,11 @@ "style_precise": "📏 Точновый", "style_balanced": "⚖️ Сбалансированновый", "style_creative": "🎨 Творческовый", + "model_changed": "✅ Модельб из момдуля {module_name} изминиа на {changed_model}", + "model_select": "Вамша модельб момдуля {module_name}: {current_model}\nВыберитеп нововую модельб момдуля {module_name},\nили проигнорировайти енто сообщенне", + "model_change_error": "❌ Ошипка при изминенни момдели!\n\n{error_text}", + "model_no_models": "❌ Вы ни можоете изменитьб момдель текумщевого модуля!", + "button_model_change": "⚙️ Изменитб момделб", "button_stop_generating": "🛑 Прекратитб дегенерабцию", "button_continue": "⏩ Продолжитб", "button_regenerate": "🔄 Дегенерироватб", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Близнетсы" + }, + "groq": { + "icon": "🔴", + "name": "Грокб" } } } diff --git a/langs/ukr.json b/langs/ukr.json index 74199700..88f4c0f4 100644 --- a/langs/ukr.json +++ b/langs/ukr.json @@ -51,6 +51,11 @@ "style_precise": "📏 Точний", "style_balanced": "⚖️ Збалансований", "style_creative": "🎨 Творчий", + "model_changed": "✅ Модель з модуля {module_name} змінився на {changed_model}", + "model_select": "Ваша модель модуля {module_name}: {current_model}\nВиберіть нову модель модуля {module_name},\nабо проігноруйте це повідомлення", + "model_change_error": "❌ Помилка при зміні моделі!\n\n{error_text}", + "model_no_models": "❌ Ви не можете змінити модель поточного модуля!", + "button_model_change": "⚙️ Змінити модель", "button_stop_generating": "🛑 Припинити генерацію", "button_continue": "⏩ Продовжити", "button_regenerate": "🔄 Перегенерувати", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/vie.json b/langs/vie.json index 798a5d30..aa8b59c7 100644 --- a/langs/vie.json +++ b/langs/vie.json @@ -51,6 +51,11 @@ "style_precise": "📏 Chính xác", "style_balanced": "⚖️ Cân đối", "style_creative": "🎨 Sáng tạo", + "model_changed": "✅ Mô hình từ mô-đun {module_name} đã thay đổi thành {changed_model}", + "model_select": "Mô hình hiện tại của bạn từ mô-đun {module_name}: {current_model}\nChọn mô hình mới từ mô-đun {module_name},\nhoặc bỏ qua thông báo này", + "model_change_error": "❌ Lỗi khi thay đổi mô hình!\n\n{error_text}", + "model_no_models": "❌ Bạn không thể thay đổi mô hình của mô-đun hiện tại!", + "button_model_change": "⚙️ Thay đổi mô hình", "button_stop_generating": "🛑 Dừng tạo ra", "button_continue": "⏩ Tiếp tục", "button_regenerate": "🔄 Tạo lại", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/langs/zho.json b/langs/zho.json index 5fb0da30..81dd6a4f 100644 --- a/langs/zho.json +++ b/langs/zho.json @@ -51,6 +51,11 @@ "style_precise": "📏 精确", "style_balanced": "⚖️ 平衡", "style_creative": "🎨 创意", + "model_changed": "✅ 模块 {module_name} 的模型已更改为 {changed_model}", + "model_select": "您当前的 {module_name} 模块模型:{current_model}\n选择 {module_name} 模块的新模型,\n或忽略此消息", + "model_change_error": "❌ 更改模型时出错!\n\n{error_text}", + "model_no_models": "❌ 您无法更改当前模块的模型!", + "button_model_change": "⚙️ 更改模型", "button_stop_generating": "🛑 停止生成", "button_continue": "⏩ 继续", "button_regenerate": "🔄 重新生成", @@ -85,6 +90,10 @@ "gemini": { "icon": "♊", "name": "Gemini" + }, + "groq": { + "icon": "🔴", + "name": "Groq" } } } diff --git a/lmao_process_loop.py b/lmao_process_loop.py index c3fd1870..f64d480a 100644 --- a/lmao_process_loop.py +++ b/lmao_process_loop.py @@ -59,6 +59,7 @@ def lmao_process_loop( lmao_request_queue: multiprocessing.Queue, lmao_response_queue: multiprocessing.Queue, lmao_exceptions_queue: multiprocessing.Queue, + *args, ) -> None: """Handler for lmao's ModuleWrapper (see module_wrapper_global.py for more info) @@ -145,6 +146,8 @@ def _lmao_stop_stream_loop() -> None: logging.warning(f"Exit from {name} loop requested") break + request_response = None + try: # Wait a bit to prevent overloading # We need to wait at the beginning to enable delay even after exception @@ -257,11 +260,9 @@ def _lmao_stop_stream_loop() -> None: break # Save conversation ID + logging.info(f"Saving user {request_response.user_id} conversation ID as: name_{conversation_id}") users_handler_.set_key(request_response.user_id, name + "_conversation_id", conversation_id) - # Return container - lmao_response_queue.put(request_response) - # Non-blocking get of user_id to clear conversation for delete_conversation_user_id = None try: @@ -296,10 +297,12 @@ def _lmao_stop_stream_loop() -> None: logging.error(f"{name} error", exc_info=e) lmao_exceptions_queue.put(e) - # Read module's status + # Read module's status and return the container finally: with lmao_module_status.get_lock(): lmao_module_status.value = module.status + if request_response: + lmao_response_queue.put(request_response) # Wait for stop handler to finish if stop_handler_thread and stop_handler_thread.is_alive(): diff --git a/lmao_process_loop_web.py b/lmao_process_loop_web.py new file mode 100644 index 00000000..9232c969 --- /dev/null +++ b/lmao_process_loop_web.py @@ -0,0 +1,649 @@ +""" +Copyright (C) 2023-2024 Fern Lane + +This file is part of the GPT-Telegramus distribution +(see ) + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +""" + +import base64 +import json +import logging +import multiprocessing +import queue +import random +import string +import threading +import time +from typing import Dict + +import requests +from lmao.module_wrapper import ( + STATUS_NOT_INITIALIZED, + STATUS_INITIALIZING, + STATUS_IDLE, + STATUS_BUSY, + STATUS_FAILED, +) + +import logging_handler +import messages +import users_handler +from bot_sender import send_message_async +from async_helper import async_helper + +# Timeout of all requests +REQUEST_TIMEOUT = 30 + +# Minimum allowed seconds between requests to prevent overloading API +REQUEST_COOLDOWN = 1.5 + +# How long to wait for module to initialize +INIT_TIMEOUT = 30 + +# How long to wait for module to become available +IDLE_TIMEOUT = 60 + +# Delay between status checks +CHECK_STATUS_INTERVAL = 10 + +# lmao process loop delay during idle +LMAO_LOOP_DELAY = 0.5 + + +def _request_wrapper( + api_url: str, + endpoint: str, + payload: Dict, + cooldown_timer: Dict, + request_lock: multiprocessing.Lock, + proxy: str or None = None, + token: str or None = None, + stream: bool = False, +) -> Dict or requests.Response: + """Wrapper for POST requests to LMAO API + + Args: + api_url (str): API base url + endpoint (str): ex. "status" or "init" + payload (Dict): request payload as JSON + cooldown_timer (multiprocessing.Value): double value to prevent "Too Many Requests" + request_lock (multiprocessing.Lock): lock to prevent multiple process from sending multiple requests at ones + proxy (str or None, optional): connect with proxy. Defaults to None + token (str or None, optional): token-based authorization. Defaults to None + stream (boor, optional). True for "ask" endpoint. Defaults to False + + Raises: + Exception: _description_ + Exception: _description_ + + Returns: + Dict or requests.Response: parsed JSON response or response object if stream is True + """ + # Add proxies if needed + proxies = None + if proxy: + proxies = { + "http": proxy, + "https": proxy, + } + + # Add token-based authorization if needed + if token: + payload["token"] = token + + # Set lock + request_lock.acquire() + + try: + + # Wait if needed + with cooldown_timer.get_lock(): + cooldown_timer_ = cooldown_timer.value + while time.time() - cooldown_timer_ < REQUEST_COOLDOWN: + time.sleep(0.1) + + with cooldown_timer.get_lock(): + cooldown_timer.value = time.time() + + # Send request + response_ = requests.post( + f"{api_url}/{endpoint}", json=payload, proxies=proxies, timeout=REQUEST_TIMEOUT, stream=stream + ) + + # Check status code + if response_.status_code != 200: + try: + error = response_.json().get("error") + except: + error = None + if error is not None: + raise Exception(str(error)) + else: + raise Exception(f"Status code: {response_.status_code}") + + # Return response itself if in stream mode + if stream: + return response_ + + # Return parsed json otherwise + return response_.json() + + # Release lock + finally: + if not stream: + request_lock.release() + + +def _check_status( + status_dict: Dict, + api_url: str, + cooldown_timer: Dict, + request_lock: multiprocessing.Lock, + proxy: str or None = None, + token: str or None = None, +) -> None: + """Sends POST request to api_url/status and writes status codes into status_dict + + Args: + status_dict (Dict): each module status will be set there as "module_name": status as integer + api_url (str): API base url + cooldown_timer (multiprocessing.Value): double value to prevent "Too Many Requests" + request_lock (multiprocessing.Lock): lock to prevent multiple process from sending multiple requests at ones + proxy (str or None, optional): connect with proxy. Defaults to None + token (str or None, optional): token-based authorization. Defaults to None + """ + if api_url.endswith("/"): + api_url = api_url[:-1] + try: + modules = _request_wrapper(api_url, "status", {}, cooldown_timer, request_lock, proxy, token) + for module in modules: + module_name = module.get("module") + module_status = module.get("status_code") + if module_name and module_status is not None and isinstance(module_status, int): + status_dict[module_name] = module_status + except Exception as e: + status_dict["exception"] = e + logging.error("Error checking status", exc_info=e) + + +def _status_checking_loop( + status_dict: Dict, + lmao_module_name: str, + status_value: multiprocessing.Value, + api_url: str, + cooldown_timer: Dict, + request_lock: multiprocessing.Lock, + proxy: str or None = None, + token: str or None = None, +) -> None: + """Calls _check_status() every CHECK_STATUS_INTERVAL seconds and sets status_value to status_dict[lmao_module_name] + set status_dict["exit"] = True to stop this + + Args: + status_dict (Dict): each module status will be set there as "module_name": status as integer + lmao_module_name (str): name of module to get status of (ex. "chatgpt") + status_value (multiprocessing.Value): multiprocessing status for lmao_module_name + api_url (str): API base url + cooldown_timer (multiprocessing.Value): double value to prevent "Too Many Requests" + request_lock (multiprocessing.Lock): lock to prevent multiple process from sending multiple requests at ones + proxy (str or None, optional): connect with proxy. Defaults to None + token (str or None, optional): token-based authorization. Defaults to None + """ + logging.info("Status checker thread started") + while not status_dict.get("exit"): + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + status_code = status_dict.get(lmao_module_name) + with status_value.get_lock(): + if status_code is not None: + status_value.value = status_code + else: + status_value.value = STATUS_NOT_INITIALIZED + time.sleep(CHECK_STATUS_INTERVAL) + logging.info("Status checker thread finished") + + +def lmao_process_loop_web( + name: str, + name_lmao: str, + config: Dict, + messages_: messages.Messages, + users_handler_: users_handler.UsersHandler, + logging_queue: multiprocessing.Queue, + lmao_process_running: multiprocessing.Value, + lmao_stop_stream_value: multiprocessing.Value, + lmao_module_status: multiprocessing.Value, + lmao_delete_conversation_request_queue: multiprocessing.Queue, + lmao_delete_conversation_response_queue: multiprocessing.Queue, + lmao_request_queue: multiprocessing.Queue, + lmao_response_queue: multiprocessing.Queue, + lmao_exceptions_queue: multiprocessing.Queue, + cooldown_timer: multiprocessing.Value, + request_lock: multiprocessing.Lock, +) -> None: + """Handler for LMAO web API + (see for more info) + """ + # Setup logging for current process + logging_handler.worker_configurer(logging_queue) + logging.info("_lmao_process_loop_web started") + + # Parse some config + api_url = config.get("modules").get("lmao_web_api_url") + proxy = config.get("modules").get("lmao_web_proxy") + token = config.get("modules").get("lmao_token_manage") + + status_dict = {} + + # Initialize module + logging.info(f"Initializing {name}") + try: + # Claim status + with lmao_module_status.get_lock(): + lmao_module_status.value = STATUS_INITIALIZING + + # Read actual status + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao) + + # Initialize module + if status_code is None or status_code == STATUS_NOT_INITIALIZED: + # Send initialization request + logging.info(f"Requesting {name_lmao} module initialization") + _request_wrapper(api_url, "init", {"module": name_lmao}, cooldown_timer, request_lock, proxy, token) + + # Wait + logging.info(f"Waiting for {name_lmao} module to initialize") + time_started = time.time() + while True: + if time.time() - time_started > INIT_TIMEOUT: + raise Exception(f"Timeout waiting for {name_lmao} module to initialize") + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao) + if status_code == STATUS_FAILED: + raise Exception(f"Unable to initialize module {name_lmao}. See LMAO logs for more info") + elif status_code == STATUS_IDLE or status_code == STATUS_BUSY: + break + time.sleep(1) + logging.info(f"{name} initialized") + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + except Exception as e: + logging.error(f"{name} initialization error", exc_info=e) + with lmao_module_status.get_lock(): + lmao_module_status.value = STATUS_FAILED + with lmao_process_running.get_lock(): + lmao_process_running.value = False + return + + # Start status checker as thread + logging.info("Starting status checker thread") + status_checking_thread = threading.Thread( + target=_status_checking_loop, + args=(status_dict, name_lmao, lmao_module_status, api_url, cooldown_timer, request_lock, proxy, token), + ) + status_checking_thread.start() + + # Main loop container + request_response = None + + def _lmao_stop_stream_loop() -> None: + """Background thread that handles stream stop signal""" + logging.info("_lmao_stop_stream_loop started") + while True: + # Exit from loop + with lmao_process_running.get_lock(): + if not lmao_process_running.value: + logging.warning("Exit from _lmao_stop_stream_loop requested") + break + + # Wait a bit to prevent overloading + # We need to wait at the beginning to enable delay even after exception and continue statement + time.sleep(LMAO_LOOP_DELAY) + + # Get stop request + lmao_stop_stream = False + with lmao_stop_stream_value.get_lock(): + if lmao_stop_stream_value.value: + lmao_stop_stream = True + lmao_stop_stream_value.value = False + + if not lmao_stop_stream: + continue + + # Send stop request + try: + logging.info(f"Requesting {name_lmao} module stop response") + _request_wrapper(api_url, "stop", {"module": name_lmao}, cooldown_timer, request_lock, proxy, token) + + # Catch process interrupts just in case + except (SystemExit, KeyboardInterrupt): + logging.warning("Exit from _lmao_stop_stream_loop requested") + break + + # Stop loop error + except Exception as e: + logging.error("_lmao_stop_stream_loop error", exc_info=e) + + # Read module's status + finally: + try: + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao, STATUS_NOT_INITIALIZED) + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + except Exception as e: + logging.warning(f"Unable to read {name_lmao} module status: {e}") + + # Done + logging.info("_lmao_stop_stream_loop finished") + + # Start stream stop signal handler + stop_handler_thread = threading.Thread(target=_lmao_stop_stream_loop) + stop_handler_thread.start() + + # Main loop + while True: + # Exit from loop + with lmao_process_running.get_lock(): + lmao_process_running_value = lmao_process_running.value + if not lmao_process_running_value: + logging.warning(f"Exit from {name} loop requested") + break + + release_lock = False + request_response = None + + try: + # Wait a bit to prevent overloading + # We need to wait at the beginning to enable delay even after exception + # But inside try-except to catch interrupts + time.sleep(LMAO_LOOP_DELAY) + + # Non-blocking get of request-response container + request_response = None + try: + request_response = lmao_request_queue.get(block=False) + except queue.Empty: + pass + + # New request + if request_response: + logging.info(f"Received new request to {name}") + with lmao_module_status.get_lock(): + lmao_module_status.value = STATUS_BUSY + + # Wait for module to become available + logging.info(f"Waiting for IDLE status from {name_lmao} module") + time_started = time.time() + while True: + if time.time() - time_started > IDLE_TIMEOUT: + raise Exception(f"Timeout waiting for IDLE status from {name_lmao} module") + status_code = status_dict.get(name_lmao) + if status_code == STATUS_FAILED: + raise Exception(f"Error waiting for IDLE status from {name_lmao}. See LMAO logs for more info") + elif status_code == STATUS_IDLE: + break + time.sleep(1) + logging.info(f"Received IDLE status from {name} module") + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + + # Extract request + prompt_text = request_response.request_text + prompt_image = request_response.request_image + + # Check prompt + if not prompt_text: + raise Exception("No text request") + else: + # Extract conversation ID + conversation_id = users_handler_.get_key(request_response.user_id, name + "_conversation_id") + + module_request = {"prompt": prompt_text, "convert_to_markdown": True} + + # Extract style (for lmao_ms_copilot only) + if name == "lmao_ms_copilot": + style = users_handler_.get_key(request_response.user_id, "ms_copilot_style", "balanced") + module_request["style"] = style + + # Add image and conversation ID + if prompt_image is not None: + module_request["image"] = base64.b64encode(prompt_image).decode("utf-8") + if conversation_id: + module_request["conversation_id"] = conversation_id + + # Reset suggestions + users_handler_.set_key(request_response.user_id, "suggestions", []) + + # Ask and read stream + release_lock = True + for line in _request_wrapper( + api_url, + "ask", + {name_lmao: module_request}, + cooldown_timer, + request_lock, + proxy, + token, + stream=True, + ).iter_lines(): + if not line: + continue + try: + response = json.loads(line.decode("utf-8")) + except Exception as e: + logging.warning(f"Unable to parse response line as JSON: {e}") + continue + + finished = response.get("finished") + conversation_id = response.get("conversation_id") + request_response.response_text = response.get("response") + + images = response.get("images") + if images is not None: + request_response.response_images = images[:] + + # Format and add attributions + attributions = response.get("attributions") + if attributions is not None and len(attributions) != 0: + response_link_format = messages_.get_message( + "response_link_format", user_id=request_response.user_id + ) + request_response.response_text += "\n" + for i, attribution in enumerate(attributions): + request_response.response_text += response_link_format.format( + source_name=str(i + 1), link=attribution.get("url", "") + ) + + # Suggestions must be stored as tuples with unique ID for reply-markup + if finished: + suggestions = response.get("suggestions") + if suggestions is not None: + request_response.response_suggestions = [] + for suggestion in suggestions: + if not suggestion or len(suggestion) < 1: + continue + id_ = "".join( + random.choices( + string.ascii_uppercase + string.ascii_lowercase + string.digits, k=8 + ) + ) + request_response.response_suggestions.append((id_, suggestion)) + users_handler_.set_key( + request_response.user_id, + "suggestions", + request_response.response_suggestions, + ) + + # Check if exit was requested + with lmao_process_running.get_lock(): + lmao_process_running_value = lmao_process_running.value + if not lmao_process_running_value: + finished = True + + # Send response to the user + async_helper( + send_message_async(config.get("telegram"), messages_, request_response, end=finished) + ) + + # Exit from stream reader + if not lmao_process_running_value: + break + + # Save conversation ID + logging.info(f"Saving user {request_response.user_id} conversation ID as: name_{conversation_id}") + users_handler_.set_key(request_response.user_id, name + "_conversation_id", conversation_id) + + # Non-blocking get of user_id to clear conversation for + delete_conversation_user_id = None + try: + delete_conversation_user_id = lmao_delete_conversation_request_queue.get(block=False) + except queue.Empty: + pass + + # Get and delete conversation + if delete_conversation_user_id is not None: + with lmao_module_status.get_lock(): + lmao_module_status.value = STATUS_BUSY + + # Wait for module to become available + logging.info(f"Waiting for IDLE status from {name_lmao} module") + time_started = time.time() + while True: + if time.time() - time_started > IDLE_TIMEOUT: + raise Exception(f"Timeout waiting for IDLE status from {name_lmao} module") + status_code = status_dict.get(name_lmao) + if status_code == STATUS_FAILED: + raise Exception(f"Error waiting for IDLE status from {name_lmao}. See LMAO logs for more info") + elif status_code == STATUS_IDLE: + break + time.sleep(1) + logging.info(f"Received IDLE status from {name} module") + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + + conversation_id = users_handler_.get_key(delete_conversation_user_id, name + "_conversation_id") + + try: + if conversation_id: + payload = {name_lmao: {"conversation_id": conversation_id}} + _request_wrapper(api_url, "delete", payload, cooldown_timer, request_lock, proxy, token) + users_handler_.set_key(delete_conversation_user_id, name + "_conversation_id", None) + lmao_delete_conversation_response_queue.put(delete_conversation_user_id) + except Exception as e: + logging.error(f"Error deleting conversation for {name}", exc_info=e) + lmao_delete_conversation_response_queue.put(e) + finally: + try: + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao, STATUS_NOT_INITIALIZED) + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + except Exception as e: + lmao_delete_conversation_response_queue.put(e) + + # Catch process interrupts just in case + except (SystemExit, KeyboardInterrupt): + logging.warning(f"Exit from {name} loop requested") + break + + # Main loop error + except Exception as e: + logging.error(f"{name} error", exc_info=e) + lmao_exceptions_queue.put(e) + + # Release lock if needed and return the container + finally: + if release_lock: + request_lock.release() + release_lock = False + if request_response: + lmao_response_queue.put(request_response) + + # Read module status + try: + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao, STATUS_NOT_INITIALIZED) + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + except Exception as e: + logging.warning(f"Unable to read {name_lmao} module status: {e}") + + # Wait for stop handler to finish + if stop_handler_thread and stop_handler_thread.is_alive(): + logging.info("Waiting for _lmao_stop_stream_loop") + try: + stop_handler_thread.join() + except Exception as e: + logging.warning(f"Error joining _lmao_stop_stream_loop: {e}") + + # Try to close module + logging.info(f"Trying to close {name}") + try: + # Request module close + _request_wrapper(api_url, "delete", {"module": name_lmao}, cooldown_timer, request_lock, proxy, token) + + # Wait + logging.info(f"Waiting for {name_lmao} module to close") + time_started = time.time() + while True: + if time.time() - time_started > INIT_TIMEOUT: + raise Exception(f"Timeout waiting for {name_lmao} module to close") + _check_status(status_dict, api_url, cooldown_timer, request_lock, proxy, token) + exception_ = status_dict.get("exception") + if exception_ is not None: + raise exception_ + status_code = status_dict.get(name_lmao) + if status_code == STATUS_FAILED: + raise Exception(f"Unable to close module {name_lmao}. See LMAO logs for more info") + elif status_code == STATUS_IDLE or status_code == STATUS_BUSY: + break + time.sleep(1) + logging.info(f"{name} initialized") + with lmao_module_status.get_lock(): + lmao_module_status.value = status_code + except Exception as e: + logging.error(f"Error closing {name}", exc_info=e) + + # Stop status checker + if status_checking_thread is not None and status_checking_thread.is_alive: + logging.info("Stopping status checking thread") + status_dict["exit"] = True + logging.info("Trying to join status checking thread") + try: + status_checking_thread.join() + except Exception as e: + logging.warning(f"Unable to join status checker thread: {e}") + + # Done + with lmao_process_running.get_lock(): + lmao_process_running.value = False + logging.info("_lmao_process_loop_web finished") diff --git a/main.py b/main.py index ff24c708..061dd3b5 100644 --- a/main.py +++ b/main.py @@ -19,6 +19,7 @@ """ import argparse +from ctypes import c_double import json import logging import multiprocessing @@ -26,7 +27,6 @@ import sys from typing import Dict - from _version import __version__ import logging_handler import messages @@ -37,7 +37,7 @@ # Default config file CONFIG_FILE = "config.json" -CONFIG_COMPATIBLE_VERSIONS = [5, 6] +CONFIG_COMPATIBLE_VERSIONS = [5, 6, 7, 8] def load_and_parse_config(config_file: str) -> Dict: @@ -66,6 +66,8 @@ def load_and_parse_config(config_file: str) -> Dict: f"Your config version ({config_version}) is not compatible! " f"Compatible versions: {', '.join(str(version) for version in CONFIG_COMPATIBLE_VERSIONS)}" ) + if config_version < max(CONFIG_COMPATIBLE_VERSIONS): + logging.warning(f"You config version {config_version} < {max(CONFIG_COMPATIBLE_VERSIONS)}! Please update it") # List of enabled modules enabled_modules = config.get("modules").get("enabled") @@ -156,6 +158,9 @@ def main(): # Load messages messages_.langs_load(config.get("files").get("messages_dir")) + web_cooldown_timer = multiprocessing.Value(c_double, 0.0) + web_request_lock = multiprocessing.Lock() + # modules = {} is a dictionary of ModuleWrapperGlobal (each enabled module) # { # "module_name": ModuleWrapperGlobal, @@ -163,9 +168,21 @@ def main(): # } for module_name in config.get("modules").get("enabled"): logging.info(f"Trying to load and initialize {module_name} module") + use_web = ( + module_name.startswith("lmao_") + and module_name in config.get("modules").get("lmao_web_for_modules", []) + and "lmao_web_api_url" in config.get("modules") + ) try: module = module_wrapper_global.ModuleWrapperGlobal( - module_name, config, messages_, users_handler_, logging_handler_.queue + module_name, + config, + messages_, + users_handler_, + logging_handler_.queue, + use_web, + web_cooldown_timer=web_cooldown_timer, + web_request_lock=web_request_lock, ) modules[module_name] = module except Exception as e: @@ -176,7 +193,15 @@ def main(): config, messages_, users_handler_, logging_handler_.queue, None, modules ) bot_handler_ = bot_handler.BotHandler( - config, args.config, messages_, users_handler_, logging_handler_.queue, queue_handler_, modules + config, + args.config, + messages_, + users_handler_, + logging_handler_.queue, + queue_handler_, + modules, + web_cooldown_timer, + web_request_lock, ) queue_handler_.prevent_shutdown_flag = bot_handler_.prevent_shutdown_flag diff --git a/messages.py b/messages.py index ce1bf46b..d8427628 100644 --- a/messages.py +++ b/messages.py @@ -80,6 +80,11 @@ "style_precise", "style_balanced", "style_creative", + "model_changed", + "model_select", + "model_change_error", + "model_no_models", + "button_model_change", "button_stop_generating", "button_continue", "button_regenerate", diff --git a/module_configs/groq.json b/module_configs/groq.json new file mode 100644 index 00000000..b044890f --- /dev/null +++ b/module_configs/groq.json @@ -0,0 +1,23 @@ +{ + "__comment01__": "Api key from ", + "api_key": "", + + "__comment02__": "Groq API base URL", + "base_url": "https://api.groq.com", + + "__comment03__": "Default model to use (users can change this by using /model command)", + "model_default": "llama3-8b-8192", + + "__comment04__": "Available (and enabled) models. See for more info", + "models": ["llama3-8b-8192", "llama3-70b-8192", "llama2-70b-4096", "mixtral-8x7b-32768", "gemma-7b-it"], + + "__comment05__": "If needed, specify proxy in http://ip:port format (specify http even if it's https proxy)", + "__comment06__": "Use http://username:password@ip:port format in case of proxy with authorization", + "proxy": "", + + "__comment07__": "Response timeout (in seconds)", + "timeout_seconds": 120, + + "__comment08__": "How often each user can send requests to this module (specify 0 to remove the restriction)", + "user_cooldown_seconds": 0 +} diff --git a/module_wrapper_global.py b/module_wrapper_global.py index 34de7e1c..c4339973 100644 --- a/module_wrapper_global.py +++ b/module_wrapper_global.py @@ -29,9 +29,6 @@ from lmao.module_wrapper import MODULES as LMAO_MODULES import psutil -from google_ai_module import GoogleAIModule -from ms_copilot_module import MSCopilotModule -from ms_copilot_designer_module import MSCopilotDesignerModule import messages import users_handler @@ -39,15 +36,32 @@ from async_helper import async_helper from bot_sender import send_message_async from lmao_process_loop import LMAO_LOOP_DELAY, lmao_process_loop +from lmao_process_loop_web import lmao_process_loop_web +from google_ai_module import GoogleAIModule +from ms_copilot_module import MSCopilotModule +from ms_copilot_designer_module import MSCopilotDesignerModule +from groq_module import GroqModule # List of available modules (their names) # LlM-Api-Open (LMAO) modules should start with lmao_ # See for more info -MODULES = ["lmao_chatgpt", "lmao_ms_copilot", "chatgpt", "dalle", "ms_copilot", "ms_copilot_designer", "gemini"] +MODULES = [ + "lmao_chatgpt", + "lmao_ms_copilot", + "chatgpt", + "dalle", + "ms_copilot", + "ms_copilot_designer", + "gemini", + "groq", +] # Names of modules with conversation history (clearable) -MODULES_WITH_HISTORY = ["lmao_chatgpt", "lmao_ms_copilot", "chatgpt", "ms_copilot", "gemini"] +MODULES_WITH_HISTORY = ["lmao_chatgpt", "lmao_ms_copilot", "chatgpt", "ms_copilot", "gemini", "groq"] + +# Names of modules with ability to select model +MODULES_WITH_MODELS = ["groq"] # Maximum time (in seconds) to wait for LMAO module to close before killing it's process _LMAO_STOP_TIMEOUT = 10 @@ -64,6 +78,9 @@ def __init__( messages_: messages.Messages, users_handler_: users_handler.UsersHandler, logging_queue: multiprocessing.Queue, + use_web: bool, + web_cooldown_timer: multiprocessing.Value, + web_request_lock: multiprocessing.Lock, ) -> None: """Module's class initialization here (and LMAO process initialization) This is called from main process. Some other functions (see below) will be called from another processes @@ -74,6 +91,9 @@ def __init__( messages_ (messages.Messages): initialized messages wrapper users_handler_ (users_handler.UsersHandler): initialized users handler logging_queue (multiprocessing.Queue): initialized logging queue to handle logs from separate processes + use_web (bool): True to use web API for LMAO modules instead of python package + web_cooldown_timer (multiprocessing.Value): double value that stores last request time to LMAO in seconds + web_request_lock (multiprocessing.Lock): lock to prevent multiple process from sending multiple requests Raises: Exception: no module or module class __init__ error @@ -123,7 +143,7 @@ def __init__( # Start LMAO process (LMAO modules needs to be loaded constantly so we need all that stuff at least for now) logging.info("Starting _lmao_process_loop as process") self._lmao_process = multiprocessing.Process( - target=lmao_process_loop, + target=lmao_process_loop_web if use_web else lmao_process_loop, args=( self.name, self.name_lmao, @@ -139,6 +159,8 @@ def __init__( self._lmao_request_queue, self._lmao_response_queue, self._lmao_exceptions_queue, + web_cooldown_timer, + web_request_lock, ), ) with self._lmao_process_running.get_lock(): @@ -185,6 +207,12 @@ def __init__( elif name == "ms_copilot_designer": self.module = MSCopilotDesignerModule(config, self.messages, self.users_handler) + ######## + # Groq # + ######## + elif name == "groq": + self.module = GroqModule(config, self.messages, self.users_handler) + def is_busy(self) -> bool: """ Returns: @@ -261,8 +289,8 @@ def process_request(self, request_response: request_response_container.RequestRe self._lmao_request_queue.put(request_response) # Wait until it's processed or failed - logging.info(f"Waiting for {self.name} request to be processed") - time.sleep(1) + logging.info(f"Waiting for {self.name} request to be processed (waiting for container)") + response_ = None while True: # Check process with self._lmao_process_running.get_lock(): @@ -279,21 +307,18 @@ def process_request(self, request_response: request_response_container.RequestRe if lmao_exception is not None: raise lmao_exception - # Check status - with self._lmao_module_status.get_lock(): - module_status = self._lmao_module_status.value - if module_status == STATUS_IDLE: + # Try to get container back + try: + response_ = self._lmao_response_queue.get(block=False) + except queue.Empty: + pass + if response_: + logging.info(f"Received container back from {self.name} process") break time.sleep(LMAO_LOOP_DELAY) # Update container - # TODO: Optimize this - response_ = None - try: - response_ = self._lmao_response_queue.get(block=True, timeout=1) - except queue.Empty: - logging.warning(f"Cannot get container back from {self.name} process") if response_: request_response.response_text = response_.response_text for response_image in response_.response_images: @@ -307,6 +332,8 @@ def process_request(self, request_response: request_response_container.RequestRe request_response.error = response_.error request_response.response_next_chunk_start_index = response_.response_next_chunk_start_index request_response.response_sent_len = response_.response_sent_len + else: + logging.warning(f"Unable to get container back from {self.name} process") ########## # Gemini # @@ -330,6 +357,13 @@ def process_request(self, request_response: request_response_container.RequestRe self.module.initialize() self.module.process_request(request_response) + ######## + # Groq # + ######## + elif self.name == "groq": + self.module.initialize() + self.module.process_request(request_response) + # Done logging.info(f"{self.name} request processing finished") @@ -472,6 +506,10 @@ def delete_conversation(self, user_id: int) -> None: elif self.name == "ms_copilot": self.module.clear_conversation_for_user(user_id) + # Groq + elif self.name == "groq": + self.module.clear_conversation_for_user(user_id) + def on_exit(self) -> None: """Calls module's post-stop actions (and closes LMAO module) This is called from main process diff --git a/request_processor.py b/request_processor.py index 96a7fef4..6891720e 100644 --- a/request_processor.py +++ b/request_processor.py @@ -107,7 +107,8 @@ def request_processor( except Exception as e: request_.error = True lang_id = users_handler_.get_key(user_id, "lang_id", "eng") - request_.response_text = messages_.get_message("response_error", lang_id=lang_id).format(error_text=str(e)) + error_text = str(e)[:1000] + request_.response_text = messages_.get_message("response_error", lang_id=lang_id).format(error_text=error_text) async_helper(send_message_async(config.get("telegram"), messages_, request_, end=True)) logging.error("Error processing request", exc_info=e) diff --git a/request_response_container.py b/request_response_container.py index 3e0024b3..f9600b40 100644 --- a/request_response_container.py +++ b/request_response_container.py @@ -33,7 +33,7 @@ PROCESSING_STATE_ABORT = 7 # State to string -PROCESSING_STATE_NAMES = ["Waiting", "Starting", "Active", "Done", "Timed out", "Canceling", "Canceling"] +PROCESSING_STATE_NAMES = ["Waiting", "Starting", "Active", "Done", "Timed out", "Canceling", "Canceling", "Abort"] class RequestResponseContainer: diff --git a/requirements.txt b/requirements.txt index c7413791..7d0bb15d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,8 @@ git+https://github.com/F33RNI/EdgeGPT@main#egg=EdgeGPT git+https://github.com/F33RNI/md2tgmd.git@main -llm-api-open~=2.1 +llm-api-open~=2.2 revChatGPT==6.8.6 -python-telegram-bot==20.3 +python-telegram-bot>=20.3 openai>=0.26.4 tiktoken>=0.2.0 OpenAIAuth>=0.3.2 @@ -12,3 +12,6 @@ BingImageCreator>=0.5.0 langdetect>=1.0.9 google-generativeai >= 0.3.1 packaging>=23.2 +requests>=2,<3 +groq>=0,<1 +httpx>=0,<1