Skip to content

Commit

Permalink
fix(bing): use custom backend
Browse files Browse the repository at this point in the history
  • Loading branch information
lss233 committed Sep 10, 2023
1 parent b727693 commit 1b5df48
Show file tree
Hide file tree
Showing 10 changed files with 110 additions and 224 deletions.
45 changes: 0 additions & 45 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,14 +185,6 @@ class OpenAIGPT3Params(BaseModel):
default=1000
)


class BingCookiePath(BaseModel):
cookie_content: str
"""Bing 的 Cookie 文件内容"""
proxy: Optional[str] = None
"""可选的代理地址,留空则检测系统代理"""


class TTSAccounts(BaseModel):
speech_key: str
"""TTS KEY"""
Expand All @@ -201,28 +193,6 @@ class TTSAccounts(BaseModel):
proxy: Optional[str] = None
"""可选的代理地址,留空则检测系统代理"""


class BingAuths(BaseModel):
show_suggestions: bool = True
"""在 Bing 的回复后加上猜你想问"""
show_references: bool = True
"""在 Bing 的回复前加上引用资料"""
show_remaining_count: bool = True
"""在 Bing 的回复后加上剩余次数"""

use_drawing: bool = False
"""使用 Bing 画图"""

wss_link: str = "wss://sydney.bing.com/sydney/ChatHub"
"""Bing 的 Websocket 接入点"""
bing_endpoint: str = "https://edgeservices.bing.com/edgesvc/turing/conversation/create"
"""Bing 的会话创建接入点"""
accounts: List[BingCookiePath] = []
"""Bing 的账号列表"""
max_messages: int = 30
"""Bing 的最大消息数,仅展示用"""


class G4fModels(BaseModel):
provider: str
"""ai提供方"""
Expand Down Expand Up @@ -791,7 +761,6 @@ class Config(BaseModel):
# === Account Settings ===
accounts: AccountsModel = AccountsModel()

bing: BingAuths = BingAuths()
azure: Optional[AzureConfig]
gpt4free: G4fAuths = G4fAuths()

Expand Down Expand Up @@ -846,20 +815,6 @@ def load_preset(self, keyword):
logger.exception(e)
logger.error("配置文件有误,请重新修改!")

@staticmethod
def __load_json_config() -> Config:
try:
import json
with open("config.json", "rb") as f:
if guessed_str := from_bytes(f.read()).best():
return Config.parse_obj(json.loads(str(guessed_str)))
else:
raise ValueError("无法识别 JSON 格式!")
except Exception as e:
logger.exception(e)
logger.error("配置文件有误,请重新修改!")
exit(-1)

@staticmethod
def load_config() -> Config:
# PaaS 部署兼容
Expand Down
11 changes: 5 additions & 6 deletions constants.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
from enum import Enum
from typing import Optional

from EdgeGPT.EdgeGPT import ConversationStyle

from config import Config
from framework.drawing import DrawingAIFactory, SDWebUI
from framework.llm import BardAdapter
Expand All @@ -15,16 +13,17 @@
from framework.llm import PoeAdapter
from framework.llm import YiyanAdapter
from framework.llm.iflytek.web import XinghuoWebAdapter
from framework.llm.microsoft.bing import ConversationStyle
from framework.llm.quora.poe_web import BotType as PoeBotType

LlmFactory.register("chatgpt-web", ChatGPTWebAdapter, ())
LlmFactory.register("chatgpt-api", ChatGPTAPIAdapter, ())
LlmFactory.register("yiyan", YiyanAdapter, ())
LlmFactory.register("slack-claude", ClaudeInSlackAdapter, ())
LlmFactory.register("bard", BardAdapter, ())
LlmFactory.register("bing-c", BingAdapter, (ConversationStyle.creative,))
LlmFactory.register("bing-p", BingAdapter, (ConversationStyle.precise,))
LlmFactory.register("bing-b", BingAdapter, (ConversationStyle.balanced,))
LlmFactory.register("bing-c", BingAdapter, (ConversationStyle.Creative,))
LlmFactory.register("bing-p", BingAdapter, (ConversationStyle.Precise,))
LlmFactory.register("bing-b", BingAdapter, (ConversationStyle.Balanced,))
LlmFactory.register("chatglm-api", ChatGLM6BAdapter, ())
LlmFactory.register("poe-capybara", PoeAdapter, (PoeBotType.Sage,))
LlmFactory.register("poe-beaver", PoeAdapter, (PoeBotType.GPT4,))
Expand All @@ -48,7 +47,7 @@
if config.sdwebui:
DrawingAIFactory.register("sd", SDWebUI, (config.sdwebui,))
DrawingAIFactory.register(
"bing", BingAdapter, ("drawing", ConversationStyle.creative))
"bing", BingAdapter, ("drawing", ConversationStyle.Creative))
DrawingAIFactory.register("openai", ChatGPTAPIAdapter, ("drawing",))


Expand Down
197 changes: 61 additions & 136 deletions framework/llm/microsoft/bing.py
Original file line number Diff line number Diff line change
@@ -1,165 +1,90 @@
import asyncio
import json
import re
from contextlib import suppress
from typing import Generator, List
import enum
from typing import Generator, List, Dict

import aiohttp
from EdgeGPT.EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess
from EdgeGPT.ImageGen import ImageGenAsync
from graia.ariadne.message.element import Image as GraiaImage
import openai
from loguru import logger

import constants
from framework.accounts import account_manager
from framework.drawing import DrawAI
from framework.exceptions import LlmOperationNotSupportedException, LlmRequestTimeoutException, \
LLmAuthenticationFailedException, DrawingFailedException
from framework.llm.llm import Llm
from framework.llm.microsoft.models import BingCookieAuth
from framework.utils.tokenutils import get_token_count

image_pattern = r"!\[.*\]\((.*)\)"


class BingAdapter(Llm, DrawAI):
cookieData = None
count: int = 0
class ConversationStyle(enum.Enum):
Creative = 'creative'
Balanced = 'balanced'
Precise = 'precise'


class BingAdapter(Llm, DrawAI):
conversation_style: ConversationStyle = None

bot: EdgeChatbot
"""底层实现"""
account: BingCookieAuth
messages: List[Dict[str, str]]

def __init__(self, session_id: str = "unknown", conversation_style: ConversationStyle = ConversationStyle.creative):
def __init__(self, session_id: str = "unknown", conversation_style: ConversationStyle = ConversationStyle.Creative):
super().__init__(session_id)
self.account = account_manager.pick('bing')
self.session_id = session_id
self.conversation_style = conversation_style
account = account_manager.pick('bing')
self.cookieData = json.loads(account.cookie_content)
try:
self.bot = EdgeChatbot(cookies=self.cookieData, proxy=constants.proxy)
except NotAllowedToAccess as e:
raise LLmAuthenticationFailedException("bing") from e
self.__conversation_keep_from = 0
self.messages = []
self.max_tokens = 7000

async def rollback(self):
raise LlmOperationNotSupportedException()
self.messages = self.messages[:-2 or None]

async def on_destoryed(self):
...

async def ask(self, prompt: str) -> Generator[str, None, None]:
self.count = self.count + 1
parsed_content = ''
image_urls = []
try:
async for final, response in self.bot.ask_stream(prompt=prompt,
conversation_style=self.conversation_style,
wss_link=constants.config.bing.wss_link,
locale="zh-cn"):
if not response:
continue

if final:
# 最后一条消息
max_messages = constants.config.bing.max_messages
with suppress(KeyError):
max_messages = response["item"]["throttling"]["maxNumUserMessagesInConversation"]

with suppress(KeyError):
raw_text = response["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"]
image_urls = re.findall(image_pattern, raw_text)

remaining_conversations = f'\n剩余回复数:{self.count} / {max_messages} ' \
if constants.config.bing.show_remaining_count else ''

if len(response["item"].get('messages', [])) > 1 and constants.config.bing.show_suggestions:
suggestions = response["item"]["messages"][-1].get("suggestedResponses", [])
if len(suggestions) > 0:
parsed_content = parsed_content + '\n猜你想问: \n'
for suggestion in suggestions:
parsed_content = f"{parsed_content}* {suggestion.get('text')} \n"

parsed_content = parsed_content + remaining_conversations

if parsed_content == remaining_conversations: # No content
yield "Bing 已结束本次会话。继续发送消息将重新开启一个新会话。"
self.count = 0
await self.bot.reset()
return
else:
# 生成中的消息
parsed_content = re.sub(r"Searching the web for:(.*)\n", "", response)
parsed_content = re.sub(r"```json(.*)```", "", parsed_content, flags=re.DOTALL)
parsed_content = re.sub(r"Generating answers for you...", "", parsed_content)
if constants.config.bing.show_references:
parsed_content = re.sub(r"\[(\d+)\]: ", r"\1: ", parsed_content)
else:
parsed_content = re.sub(r"(\[\d+]: .+)+", "", parsed_content)
parts = re.split(image_pattern, parsed_content)
# 图片单独保存
parsed_content = parts[0]

if len(parts) > 2:
parsed_content = parsed_content + parts[-1]

yield parsed_content
logger.debug(f"[Bing AI 响应] {parsed_content}")
image_tasks = [
asyncio.create_task(self.__download_image(url))
for url in image_urls
]
for image in await asyncio.gather(*image_tasks):
yield image
except (asyncio.exceptions.TimeoutError, asyncio.exceptions.CancelledError) as e:
raise LlmRequestTimeoutException("bing") from e
except NotAllowedToAccess as e:
raise LLmAuthenticationFailedException("bing") from e
except Exception as e:
if str(e) == 'Redirect failed':
raise DrawingFailedException() from e
raise e

async def text_to_img(self, prompt: str):
logger.debug(f"[Bing Image] Prompt: {prompt}")
try:
async with ImageGenAsync(
all_cookies=self.bot.chat_hub.cookies,
quiet=True
) as image_generator:
images = await image_generator.get_images(prompt)

logger.debug(f"[Bing Image] Response: {images}")
tasks = [asyncio.create_task(self.__download_image(image)) for image in images]
return await asyncio.gather(*tasks)
except Exception as e:
if str(e) == 'Redirect failed':
raise DrawingFailedException() from e
raise e

async def img_to_img(self, init_images: List[GraiaImage], prompt=''):
return await self.text_to_img(prompt)

async def __download_image(self, url) -> GraiaImage:
logger.debug(f"[Bing AI] 下载图片:{url}")

async with aiohttp.ClientSession() as session:
async with session.get(url, proxy=self.bot.proxy) as resp:
resp.raise_for_status()
logger.debug(f"[Bing AI] 下载完成:{resp.content_type} {url}")
return GraiaImage(data_bytes=await resp.read())
async def ask(self, msg: str) -> Generator[str, None, None]:
"""向 AI 发送消息"""
self.messages.append({"role": "user", "content": msg})
full_chunk = []
full_text = ''
while self.max_tokens - get_token_count('gpt-4', self.messages) < 0 and \
len(self.messages) > self.__conversation_keep_from:
self.messages.pop(self.__conversation_keep_from)
logger.debug(
f"清理 token,历史记录遗忘后使用 token 数:{str(get_token_count('gpt-4', self.messages))}"
)
async for chunk in await openai.ChatCompletion.acreate(
model=f'bing-{self.conversation_style.value}',
messages=self.messages,
stream=True,
api_base="https://llm-proxy.lss233.com/bing/v1",
api_key="sk-274a8645fd3clss233achatgptfor0botfe",
headers=self.account.build_headers()
):
logger.info(chunk.choices[0].delta)
full_chunk.append(chunk.choices[0].delta)
full_text = ''.join([m.get('content', '') for m in full_chunk])
yield full_text
logger.debug(f"[Bing-{self.conversation_style.value}] {self.session_id} - {full_text}")
self.messages.append({"role": "assistant", "content": full_text})

# async def __download_image(self, url) -> GraiaImage:
# logger.debug(f"[Bing AI] 下载图片:{url}")
#
# async with aiohttp.ClientSession() as session:
# async with session.get(url, proxy=self.bot.proxy) as resp:
# resp.raise_for_status()
# logger.debug(f"[Bing AI] 下载完成:{resp.content_type} {url}")
# return GraiaImage(data_bytes=await resp.read())

@classmethod
def register(cls):
account_manager.register_type("bing", BingCookieAuth)

async def preset_ask(self, role: str, text: str):
if role.endswith('bot') or role in {'assistant', 'bing'}:
logger.debug(f"[预设] 响应:{text}")
yield text
else:
logger.debug(f"[预设] 发送:{text}")
item = None
async for item in self.ask(text):
pass
if item:
logger.debug(f"[预设] Chatbot 回应:{item}")
async def preset_ask(self, role: str, prompt: str):
if role.endswith('bot') or role in {'assistant', 'chatgpt'}:
logger.debug(f"[预设] 响应:{prompt}")
yield prompt
role = 'assistant'
if role not in ['assistant', 'user', 'system']:
raise ValueError(f"预设文本有误!仅支持设定 assistant、user 或 system 的预设文本,但你写了{role}。")
self.messages.append({"role": role, "content": prompt})
self.__conversation_keep_from = len(self.messages)
15 changes: 9 additions & 6 deletions framework/llm/microsoft/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,16 @@ class Config:
async def check_alive(self) -> bool:
async with httpx.AsyncClient(
trust_env=True,
headers={
"Cookie": ';'.join(f"{cookie['name']}=cookie['value'])" for cookie in json.loads(self.cookie_content)),
"sec-ch-ua": r'"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.64'
}
headers=self.build_headers()
) as client:
response = await client.get(f"{constants.config.bing.bing_endpoint}")
response = await client.get("https://edgeservices.bing.com/edgesvc/turing/conversation/create")
if response.json()["result"]["value"] == "UnauthorizedRequest":
raise LLmAuthenticationFailedException(response.json()["result"]["message"])
return "Success" in response.text

def build_headers(self):
return {
"Cookie": ';'.join(f"{cookie['name']}={cookie['value']})" for cookie in json.loads(self.cookie_content)),
"sec-ch-ua": r'"Chromium";v="112", "Microsoft Edge";v="112", "Not:A-Brand";v="99"',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.64'
}
2 changes: 1 addition & 1 deletion framework/llm/quora/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class Config:
async def check_alive(self) -> bool:
async with httpx.AsyncClient() as client:
response = await client.post(
'https://chatgpt-proxy.lss233.com/poe/v1/models',
'https://llm-proxy.lss233.com/poe/v1/models',
headers={'Authorization': f'Bearer {self.p_b}'}
)

Expand Down
Loading

0 comments on commit 1b5df48

Please sign in to comment.