Skip to content

Commit

Permalink
- patch : Fail to generate response. Resolves #15. Resolves #14
Browse files Browse the repository at this point in the history
- patch : Drop support for websocket.
  • Loading branch information
Simatwa committed Apr 15, 2024
1 parent 7451be9 commit 7641f33
Show file tree
Hide file tree
Showing 7 changed files with 67 additions and 131 deletions.
4 changes: 0 additions & 4 deletions WebChatGPT/errors.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,3 @@
class WebSocketError(Exception):
pass


class CookieExpiredError(Exception):
pass

Expand Down
171 changes: 54 additions & 117 deletions WebChatGPT/main.py
Original file line number Diff line number Diff line change
@@ -1,69 +1,10 @@
#!/usr/bin/python
import requests
from WebChatGPT import utils
import logging
import json
import re
from functools import lru_cache
import websocket
from base64 import b64decode
from WebChatGPT.errors import WebSocketError
from threading import Thread as thr
from typing import Iterator
from .errors import MaximumRetrialError


class Websocket:

def __init__(
self,
data: dict,
chatgpt: object,
trace: bool = False,
):
chatgpt.socket_closed = False
chatgpt.loading_chunk = ""
self.payload = data.copy()
self.url = data.get("wss_url")
self.payload.pop("wss_url")
self.chatgpt = chatgpt
self.last_response_chunk: dict = {}
self.last_response_undecoded_chunk: dict = {}
websocket.enableTrace(trace)

def on_message(self, ws, message):
response = json.loads(message)
self.chatgpt.last_response_undecoded_chunk = response
decoded_body = b64decode(response["body"]).decode("utf-8")
response["body"] = decoded_body
self.chatgpt.last_response_chunk = response
self.chatgpt.loading_chunk = decoded_body

def on_error(self, ws, error):
self.on_close("ws")
raise WebSocketError(error)

def on_close(self, ws, *args, **kwargs):
self.chatgpt.socket_closed = True

def on_open(
self,
ws,
):
json_data = json.dumps(self.payload, indent=4)
ws.send(json_data)

def run(
self,
):
ws = websocket.WebSocketApp(
self.url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open,
)
ws.run_forever(origin="https://chat.openai.com")


class ChatGPT:
Expand Down Expand Up @@ -127,6 +68,9 @@ def __init__(
self.stop_sharing_conversation_endpoint = (
"https://chat.openai.com/backend-api/%(share_id)s"
)
self.sentinel_chat_requirements_endpoint: str = (
"https://chat.openai.com/backend-api/sentinel/chat-requirements"
)
self.session.headers["User-Agent"] = user_agent
self.locale = locale
self.model = model
Expand All @@ -139,12 +83,7 @@ def __init__(
self.__already_init = False
self.__index = conversation_index
self.__title_cache = {}
self.last_response_undecoded_chunk: str = ""
self.last_response_chunk: dict = {}
self.loading_chunk: str = ""
self.socket_closed: bool = True
self.trace = trace
self.request_more_times: int = 2
self.stream_chunk_size = 64
# self.register_ws =self.session.post("https://chat.openai.com/backend-api/register-websocket")
# Websocket(self.register_ws.json(),self).run()

Expand All @@ -171,6 +110,13 @@ def current_conversation_id(self):
def get_current_message_id(self):
return self.last_response_metadata.get(2).get("message_id")

def update_sentinel_tokens(self):
resp = self.session.post(self.sentinel_chat_requirements_endpoint, json={})
resp.raise_for_status()
self.session.headers.update(
{"OpenAI-Sentinel-Chat-Requirements-Token": resp.json()["token"]}
)

def ask(
self,
prompt: str,
Expand Down Expand Up @@ -228,32 +174,28 @@ def ask(
}
```
"""
self.update_sentinel_tokens()
response = self.session.post(
url=self.conversation_endpoint,
json=self.__generate_payload(prompt),
timeout=self.timeout,
stream=False,
stream=True,
)
response.raise_for_status()
ws_payload = dict(response.json())
self.__request_more_count: int = 0

# out = lambda v:print(json.dumps(dict(v), indent=4))
# out(response.headers)
def for_stream():

ws = Websocket(ws_payload, self, self.trace)
t1 = thr(target=ws.run)
t1.start()
cached_loading_chunk = self.loading_chunk
cached_last_response = self.last_response.copy()
while True:
if self.loading_chunk != cached_loading_chunk:
# New chunk loaded
# response.raise_for_status()
if (
response.ok
and response.headers.get("content-type")
== "text/event-stream; charset=utf-8"
):

def for_stream():
for value in response.iter_lines(
decode_unicode=True,
delimiter="data:",
chunk_size=self.stream_chunk_size,
):
try:
value = self.loading_chunk
# print(value)
to_dict = json.loads(value[5:])
to_dict = json.loads(value)
if "is_completion" in to_dict.keys():
# Metadata (response)
self.last_response_metadata[
Expand All @@ -269,40 +211,35 @@ def for_stream():
yield value
pass

finally:
cached_loading_chunk = self.loading_chunk

if self.socket_closed:
t1.join()
break

if (
self.last_response == cached_last_response
or self.last_response["message"]["status"] != "finished_successfully"
):

# print(json.dumps(self.last_response, indent=4))
# print("Requesting more body")
# print('=='*40)
t1.join()
if self.__request_more_count >= self.request_more_times:
raise MaximumRetrialError(
f"Failed to generate response after {self.request_more_times} attempts"
)

for value in for_stream():
yield value

self.__request_more_count += 1
# else:
# print(print(json.dumps(self.last_response_chunk, indent=4)))
def for_non_stream():
response_to_be_returned = {}
for value in response.iter_lines(
decode_unicode=True,
delimiter="data:",
chunk_size=self.stream_chunk_size,
):
try:
to_dict = json.loads(value)
if "is_completion" in to_dict.keys():
# Metadata (response)
self.last_response_metadata[
2 if to_dict.get("is_completion") else 1
] = to_dict
continue
# Only data containing the `feedback body` make it to here
self.last_response.update(to_dict)
response_to_be_returned.update(to_dict)
except json.decoder.JSONDecodeError:
# Caused by either empty string or [DONE]
pass
return response_to_be_returned

def for_non_stream():
for _ in for_stream():
pass
return self.last_response
return for_stream() if stream else for_non_stream()

return for_stream() if stream else for_non_stream()
else:
raise Exception(
f"Failed to fetch response - ({response.status_code}, {response.reason} : {response.headers.get('content-type')} : {response.text}"
)

def chat(self, prompt: str, stream: bool = False) -> str:
"""Interact with ChatGPT on the fly
Expand Down
5 changes: 3 additions & 2 deletions WebChatGPT/utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from datetime import datetime, timezone
import json
import logging
import os
import locale
from uuid import uuid4
from typing import Any
from .errors import CookieExpiredError
Expand All @@ -27,6 +27,7 @@
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0",
"OAI-Language": locale.getlocale()[0].replace("_", "-"),
}

response_example = {
Expand Down Expand Up @@ -238,7 +239,7 @@ def generate_payload(self: object, prompt: str) -> dict:
"timezone_offset_min": -180,
"suggestions": [],
"history_and_training_disabled": self.disable_history_and_training,
"arkose_token": None,
# "arkose_token": None,
"conversation_mode": {"kind": "primary_assistant"},
"force_paragen": False,
"force_rate_limit": False,
Expand Down
9 changes: 8 additions & 1 deletion docs/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -159,4 +159,11 @@ More console chat manipulation features.

**What's new?**

- patch: Independencies
- patch: Independencies

## v0.3.0

**What's new?*

- patch : Fail to generate response. Resolves #15 #14
- patch : Drop support for websocket.
3 changes: 0 additions & 3 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@

Unlike the [official Openai library](https://github.com/openai/openai-python), this library makes REST-API calls to [ChatGPT](https://chat.openai.com) via the **browser** endpoints. *No API-KEY required*

> [!CAUTION]
> **Currently** very unreliable!
```python
from WebChatGPT import ChatGPT
bot = ChatGPT(
Expand Down
3 changes: 1 addition & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,4 @@ python-dotenv==1.0.0
click==8.1.3
rich==13.3.4
clipman==3.1.0
pyperclip==1.8.2
websocket-client==1.7.0
pyperclip==1.8.2
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

setup(
name="webchatgpt",
version="0.2.9",
version="0.3.0",
license="GNU v3",
author="Smartwa",
maintainer="Smartwa",
Expand Down Expand Up @@ -41,7 +41,6 @@
"rich==13.3.4",
"clipman==3.1.0",
"pyperclip==1.8.2",
"websocket-client==1.7.0",
],
python_requires=">=3.10",
keywords=[
Expand Down

0 comments on commit 7641f33

Please sign in to comment.