From 68c7a92ee269131e4aada81fdbc994466ab38b57 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 16 Dec 2024 00:58:45 +0100 Subject: [PATCH] Hide api_key fields from disabeld providers --- g4f/Provider/Blackbox2.py | 21 ++++++++++----------- g4f/Provider/needs_auth/Cerebras.py | 18 ++++++++++-------- g4f/Provider/needs_auth/OpenaiChat.py | 2 +- g4f/gui/client/index.html | 26 +++++++++++++------------- g4f/gui/client/static/css/style.css | 3 ++- g4f/gui/client/static/js/chat.v1.js | 2 ++ 6 files changed, 38 insertions(+), 34 deletions(-) diff --git a/g4f/Provider/Blackbox2.py b/g4f/Provider/Blackbox2.py index ce949b8b30a..f27a25595f8 100644 --- a/g4f/Provider/Blackbox2.py +++ b/g4f/Provider/Blackbox2.py @@ -6,7 +6,7 @@ import json from pathlib import Path from aiohttp import ClientSession -from typing import AsyncGenerator +from typing import AsyncIterator from ..typing import AsyncResult, Messages from ..image import ImageResponse @@ -21,12 +21,12 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-70b": "https://www.blackbox.ai/api/improve-prompt", "flux": "https://www.blackbox.ai/api/image-generator" } - + working = True supports_system_message = True supports_message_history = True supports_stream = False - + default_model = 'llama-3.1-70b' chat_models = ['llama-3.1-70b'] image_models = ['flux'] @@ -97,15 +97,14 @@ async def create_async_generator( messages: Messages, prompt: str = None, proxy: str = None, - prompt: str = None, max_retries: int = 3, delay: int = 1, max_tokens: int = None, **kwargs - ) -> AsyncGenerator[str, None]: + ) -> AsyncResult: if not model: model = cls.default_model - + if model in cls.chat_models: async for result in cls._generate_text(model, messages, proxy, max_retries, delay, max_tokens): yield result @@ -125,13 +124,13 @@ async def _generate_text( max_retries: int = 3, delay: int = 1, max_tokens: int = None, - ) -> AsyncGenerator[str, None]: + ) -> AsyncIterator[str]: headers = cls._get_headers() async with ClientSession(headers=headers) as session: license_key = await cls._get_license_key(session) api_endpoint = cls.api_endpoints[model] - + data = { "messages": messages, "max_tokens": max_tokens, @@ -162,7 +161,7 @@ async def _generate_image( model: str, prompt: str, proxy: str = None - ) -> AsyncGenerator[ImageResponse, None]: + ) -> AsyncIterator[ImageResponse]: headers = cls._get_headers() api_endpoint = cls.api_endpoints[model] @@ -170,11 +169,11 @@ async def _generate_image( data = { "query": prompt } - + async with session.post(api_endpoint, headers=headers, json=data, proxy=proxy) as response: response.raise_for_status() response_data = await response.json() - + if 'markdown' in response_data: image_url = response_data['markdown'].split('(')[1].split(')')[0] yield ImageResponse(images=image_url, alt=prompt) diff --git a/g4f/Provider/needs_auth/Cerebras.py b/g4f/Provider/needs_auth/Cerebras.py index df34db0eec3..86b2dcbda99 100644 --- a/g4f/Provider/needs_auth/Cerebras.py +++ b/g4f/Provider/needs_auth/Cerebras.py @@ -16,6 +16,7 @@ class Cerebras(OpenaiAPI): models = [ "llama3.1-70b", "llama3.1-8b", + "llama-3.3-70b" ] model_aliases = {"llama-3.1-70b": "llama3.1-70b", "llama-3.1-8b": "llama3.1-8b"} @@ -29,14 +30,15 @@ async def create_async_generator( cookies: Cookies = None, **kwargs ) -> AsyncResult: - if api_key is None and cookies is None: - cookies = get_cookies(".cerebras.ai") - async with ClientSession(cookies=cookies) as session: - async with session.get("https://inference.cerebras.ai/api/auth/session") as response: - raise_for_status(response) - data = await response.json() - if data: - api_key = data.get("user", {}).get("demoApiKey") + if api_key is None: + if cookies is None: + cookies = get_cookies(".cerebras.ai") + async with ClientSession(cookies=cookies) as session: + async with session.get("https://inference.cerebras.ai/api/auth/session") as response: + await raise_for_status(response) + data = await response.json() + if data: + api_key = data.get("user", {}).get("demoApiKey") async for chunk in super().create_async_generator( model, messages, api_base=api_base, diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 019761985de..0e25a28d8fa 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -504,7 +504,7 @@ async def synthesize(cls, params: dict) -> AsyncIterator[bytes]: await cls.login() async with StreamSession( impersonate="chrome", - timeout=900 + timeout=0 ) as session: async with session.get( f"{cls.url}/backend-api/synthesize", diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index c53284ac615..5ddc1104f2f 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -150,47 +150,47 @@

Settings

-
+ -
+ -
+ -
+ -
+ -
+ -
- - + -
+ -
+ -
+ -
+ diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 10f20607ce9..a499779a644 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -1022,7 +1022,8 @@ ul { } .settings h3 { - padding-left: 50px; + padding-left: 54px; + padding-top: 18px; } .buttons { diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index f8bd894dc8e..222886e9696 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -1293,6 +1293,7 @@ const load_provider_option = (input, provider_name) => { providerSelect.querySelectorAll(`option[data-parent="${provider_name}"]`).forEach( (el) => el.removeAttribute("disabled") ); + settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.remove("hidden"); } else { modelSelect.querySelectorAll(`option[data-providers*="${provider_name}"]`).forEach( (el) => { @@ -1307,6 +1308,7 @@ const load_provider_option = (input, provider_name) => { providerSelect.querySelectorAll(`option[data-parent="${provider_name}"]`).forEach( (el) => el.setAttribute("disabled", "disabled") ); + settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.add("hidden"); } };