From 187080d08c268c989e294e021eb974f2bd9c15d5 Mon Sep 17 00:00:00 2001 From: Simatwa Date: Thu, 21 Dec 2023 01:28:56 +0300 Subject: [PATCH] Auto-fetch OAuth value instead of accepting as parameter - Chat History - Rename conversation - Share conversation - Delete conversation - Retrieve shared conversations - Stop sharing conversation - Archive conversation - Conversation index starts from 1, 0 reserved for new creations --- .gitignore | 4 +- WebChatGPT/__init__.py | 2 +- WebChatGPT/console.py | 34 ++--- WebChatGPT/main.py | 330 +++++++++++++++++++++++++++++++++++++++-- WebChatGPT/utils.py | 32 +++- docs/CHANGELOG.md | 17 ++- docs/DEVELOPER.md | 36 ++++- docs/README.md | 16 +- 8 files changed, 404 insertions(+), 67 deletions(-) diff --git a/.gitignore b/.gitignore index cdb0fff..1fd134e 100644 --- a/.gitignore +++ b/.gitignore @@ -161,4 +161,6 @@ cython_debug/ *.vscode *.env *test.py -README.md \ No newline at end of file +README.md +resp.json +*recons \ No newline at end of file diff --git a/WebChatGPT/__init__.py b/WebChatGPT/__init__.py index 1c49cd2..e96185e 100644 --- a/WebChatGPT/__init__.py +++ b/WebChatGPT/__init__.py @@ -2,7 +2,7 @@ __all__ = ["ChatGPT"] -__version__ = "0.0.2" +__version__ = "0.1.0" __author__ = "Smartwa" __repo__ = "https://github.com/Simatwa/WebChatGPT" __info__ = "Reverse Engineered ChatGPT Web-version." diff --git a/WebChatGPT/console.py b/WebChatGPT/console.py index d1cb727..3cf7d24 100644 --- a/WebChatGPT/console.py +++ b/WebChatGPT/console.py @@ -93,9 +93,11 @@ class InteractiveChatGPT(cmd.Cmd): f"┌─[{getpass.getuser().capitalize()}@WebChatGPT]({__version__})\r\n└──╼ ❯❯❯" ) - def __init__(self, auth, cookie_path, model, index, timeout, *args, **kwargs): + def __init__(self, cookie_path, model, index, timeout, *args, **kwargs): super().__init__(*args, **kwargs) - self.bot = ChatGPT(auth, cookie_path, model, index, timeout=timeout) + self.bot = ChatGPT( + cookie_path, model=model, conversation_index=index, timeout=timeout + ) def do_help(self, text): """Echoes useful help info @@ -167,13 +169,6 @@ def chat(): @chat.command() -@click.option( - "-A", - "--auth", - help="OpenAI's authorization value", - envvar="openai_authorization", - prompt="Enter authorization value for `chat.openai.com`", -) @click.option( "-C", "--cookie-path", @@ -190,7 +185,7 @@ def chat(): default="text-davinci-002-render-sha", ) @click.option( - "-I", "--index", help="Conversation index to resume from", type=click.INT, default=0 + "-I", "--index", help="Conversation index to resume from", type=click.INT, default=1 ) @click.option( "-T", @@ -213,13 +208,11 @@ def chat(): envvar="busy_bar_index", ) @click.option("--prettify/--raw", default=True, help="Prettify the markdowned response") -def interactive( - auth, cookie_path, model, index, timeout, prompt, busy_bar_index, prettify -): +def interactive(cookie_path, model, index, timeout, prompt, busy_bar_index, prettify): """Chat with ChatGPT interactively""" assert isinstance(busy_bar_index, int), "Index must be an integer only" busy_bar.spin_index = busy_bar_index - bot = InteractiveChatGPT(auth, cookie_path, model, index, timeout) + bot = InteractiveChatGPT(cookie_path, model, index, timeout) bot.prettify = prettify if prompt: bot.default(prompt) @@ -227,13 +220,6 @@ def interactive( @chat.command() -@click.option( - "-A", - "--auth", - help="OpenAI's authorization value", - envvar="openai_authorization", - prompt="Enter authorization value for `chat.openai.com`", -) @click.option( "-C", "--cookie-path", @@ -250,7 +236,7 @@ def interactive( default="text-davinci-002-render-sha", ) @click.option( - "-I", "--index", help="Conversation index to resume from", type=click.INT, default=0 + "-I", "--index", help="Conversation index to resume from", type=click.INT, default=1 ) @click.option( "-T", @@ -266,10 +252,10 @@ def interactive( prompt="Enter message", ) @click.option("--prettify/--raw", default=True, help="Prettify the markdowned response") -def generate(auth, cookie_path, model, index, timeout, prompt, prettify): +def generate(cookie_path, model, index, timeout, prompt, prettify): """Generate a quick response with ChatGPT""" - content = ChatGPT(auth, cookie_path, model, index, timeout=timeout).chat(prompt) + content = ChatGPT(cookie_path, model, index, timeout=timeout).chat(prompt) if prettify: rich.print(Markdown(content)) diff --git a/WebChatGPT/main.py b/WebChatGPT/main.py index 5663df4..97ae133 100755 --- a/WebChatGPT/main.py +++ b/WebChatGPT/main.py @@ -9,21 +9,20 @@ class ChatGPT: def __init__( self, - authorization: str, cookie_path: str, model: str = "text-davinci-002-render-sha", - conversation_index: int = 0, + conversation_index: int = 1, locale: str = "en-US", user_agent: str = "Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0", timeout: tuple = 30, + disable_history_and_training: bool = False, ): """Initializes ChatGPT Args: - authorization (str): OpenAI's authorization value cookie_path (str): Path to `.json` file containing `chat.openai.com` cookies model (str, optional): ChatGPT text generation model name. Defaults to `text-davinci-002-render-sha`. - conversation_index (int, optional): Conversation index to pick up conversation from. Defaults to `0`. + conversation_index (int, optional): Conversation index to pick up conversation from. Defaults to `1`. locale (str, optional): Your locale. Defaults to `en-US` user_agent (str, optional): Http request header User-Agent. Defaults to `Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0` timeout (int, optional): Http request timeout. @@ -31,8 +30,9 @@ def __init__( """ self.session = requests.Session() self.timeout = timeout - self.session.headers.update(utils.get_request_headers(authorization)) + self.auth = {} # Will be updated while updating headers self.session.cookies.update(utils.get_cookies(cookie_path)) + self.session.headers.update(utils.get_request_headers_and_append_auth(self)) self.conversation_endpoint = "https://chat.openai.com/backend-api/conversation" self.account_detail_endpoint = ( "https://chat.openai.com/backend-api/accounts/check" @@ -47,21 +47,57 @@ def __init__( "https://chat.openai.com/backend-api/conversations" ) self.title_generation_endpoint = "https://chat.openai.com/backend-api/conversation/gen_title/%(conversation_id)s" - self.suggestions = ( - [] - ) # [prompt['title'] for prompt in self.prompt_library()['items'] ] # Generated by ChatGPT - self.conversation_metadata = self.previous_conversations( - index=conversation_index + self.conversation_manipulation_endpoint = ( + "https://chat.openai.com/backend-api/conversation/%(conversation_id)s" + ) + self.share_conversation_endpoint = ( + "https://chat.openai.com/backend-api/share/create" + ) + self.share_conversation_patch_endpoint = ( + "https://chat.openai.com/backend-api/share/%(share_id)s" + ) + self.shared_conversations_endpoint = ( + "https://chat.openai.com/backend-api/shared_conversations" + ) + self.shared_conversation_view_endpoint = ( + "https://chat.openai.com/share/%(share_id)s" + ) + self.stop_sharing_conversation_endpoint = ( + "https://chat.openai.com/backend-api/%(share_id)s" ) self.session.headers["User-Agent"] = user_agent self.locale = locale self.model = model + self.disable_history_and_training = disable_history_and_training self.last_response = {} self.last_response_metadata = {} + self.__already_init = False + self.__index = conversation_index def __generate_payload(self, prompt: str) -> dict: return utils.generate_payload(self, prompt) + @property + def current_conversation_id(self): + if self.__already_init: + return self.last_response_metadata.get("conversation_id") + else: + self.__already_init = True + id = ( + self.previous_conversations( + index=self.__index, + limit=28, + )["id"] + if self.__index + else None + ) # When index is 0 create new conversation else resume conversation + self.last_response_metadata["conversation_id"] = id + return id + + @property + def current_message_id(self): + return self.last_response_metadata.get("message_id") + def ask(self, prompt: str, stream: bool = False) -> dict: """Chat with ChatGPT @@ -358,11 +394,11 @@ def prompt_library(self, limit: int = 4, offset: int = 0) -> list: def previous_conversations( self, - limit: int = 10, + limit: int = 20, offset: int = 0, order: str = "updated", - index: int = 0, - all: str = False, + index: int = 1, + all: bool = False, ) -> list: """Loads previous conversations @@ -370,8 +406,8 @@ def previous_conversations( limit (int, optional): Fetch this specific amount of chats. Defaults to 28. offset (int, optional): ``. Defaults to 0. order (str, optional): Sort order. Defaults to "updated". - index (int, optional): Return items of this index. Defaults to 0. - all (bool, optional): Return the specified limit convos + index (int, optional): Index of the item to be returned +1. Defaults to 1. + all (bool, optional): Return all conversations based on specified limit Returns: list: Previous conversations contained in dict @@ -412,6 +448,7 @@ def previous_conversations( ``` """ assert isinstance(index, int), "Index must be an integer" + index -= 1 # So that 0 equates to False as in self.current_conversation_id resp = self.session.get( self.previous_conversations_endpoint, params={"limit": limit, "offset": offset, "order": order}, @@ -445,3 +482,266 @@ def generate_title(self, conversation_id: str, message_id: str) -> dict: timeout=self.timeout, ) return utils.is_json(resp, "title") + + def delete_conversation(self, conversation_id: str) -> dict: + """Deletes a particular conversation based on ID + + Args: + conversation_id (str): Conversation iD + + Returns: + dict: Response + """ + resp = self.session.patch( + self.conversation_manipulation_endpoint + % {"conversation_id": conversation_id}, + json={"is_visible": False}, + ) + return utils.is_json(resp, "delete") + + def chat_history(self, conversation_id: str, all: bool = False) -> dict: + """Fetches previous chat prompts and responses + + Args: + conversation_id (str): Conversation ID + all (bool): Return all response as received. Defaults to False. + + Returns: + dict: Previous chats + + ```json + { + "title": "Trump's Age Calculator", + "create_time": 1703074882.684634, + "update_time": 1703074885.46044, + "current_node": "f18a446d-8843-4433-acf7-79cb01a8xxxx", + "conversation_id": "00565704-a7ae-4278-bd14-ca598fedxxxx", + "is_archived": false, + "moderation_results": [], + "safe_urls": [], + "content": [ + { + "author": "User", + "create_time": 1703074882.685243, + "text": "How old is Donald Triump", + "status": "finished_successfully", + "id": "aaa2ea97-d8bd-4b93-bff9-09d1a684xxxx" + }, + { + "author": "ChatGPT", + "create_time": 1703074884.019059, + "text": "Donald Trump was born on June 14, 1946, so his age depends on the current date. If you tell me today's date, I can calculate his age for you!", + "status": "finished_successfully", + "id": "f18a446d-8843-4433-acf7-79cb01a8xxxx" + } + ] + } + ``` + """ + resp = self.session.get( + self.conversation_manipulation_endpoint + % {"conversation_id": conversation_id} + ) + from_chatgpt = utils.is_json(resp, "chat history") + if all: + return from_chatgpt + # title + # create_time + # update_time + # mapping > + # + # mapping > message_id > message > content > parts[0] + # > create_time + # > status + new_resp = { + "title": from_chatgpt["title"], + "create_time": from_chatgpt["create_time"], + "update_time": from_chatgpt["update_time"], + "current_node": from_chatgpt["current_node"], + "conversation_id": from_chatgpt["conversation_id"], + "is_archived": from_chatgpt["is_archived"], + "moderation_results": from_chatgpt["moderation_results"], + "safe_urls": from_chatgpt["safe_urls"], + "content": [], + } + for count, entry in enumerate(list(from_chatgpt["mapping"].keys())[2:]): + in_need = from_chatgpt["mapping"][entry]["message"] + new_resp["content"].append( + { + "author": "ChatGPT" if count % 2 else "User", + "create_time": in_need["create_time"], + "text": in_need["content"]["parts"][0], + "status": in_need["status"], + "id": entry, + } + ) + return new_resp + + def rename_conversation(self, conversation_id: str, title: str) -> dict: + """Renames conversation title + + Args: + conversation_id (str): Conversation ID + title (str): New conversation title + + Returns: + dict: Success report + ```json + { + "success": true + } + ``` + """ + resp = self.session.patch( + self.conversation_manipulation_endpoint + % {"conversation_id": conversation_id}, + json={"title": title}, + ) + return utils.is_json(resp, "rename conversation") + + def archive_conversation( + self, conversation_id: str, is_archived: bool = True + ) -> dict: + """Archives a particular conversation + + Args: + conversation_id (str): Conversation ID + is_archived (bool): Archive (True) or Unarchive (False). Defaults to `True`. + Returns: + dict: Success report + ```json + { + "success": true + } + ``` + """ + resp = self.session.patch( + self.conversation_manipulation_endpoint + % { + "conversation_id": conversation_id, + }, + json={ + "is_archived": is_archived, + }, + ) + return utils.is_json(resp, "archive conversation") + + def share_conversation( + self, + conversation_id: str, + is_anonymous: bool = True, + is_public: bool = True, + is_visible: bool = True, + ) -> dict: + """Generate link for sharing conversation + + Args: + conversation_id (str): Conversation ID + anonymous (bool, optional): Hide your Identity in the share. Defaults to True. + + Returns: + dict: Success report + ```json + { + "share_id": "a71119f8-9a49-4c1d-b18f-d698313bxxxx", + "share_url": "https://chat.openai.com/share/a71119f8-9a49-4c1d-b18f-d698313bxxxx", + "title": "Trump's Age Calculator", + "is_public": false, + "is_visible": true, + "is_anonymous": true, + "highlighted_message_id": null, + "current_node_id": "f18a446d-8843-4433-acf7-79cb01a8xxxx", + "already_exists": false, + "moderation_state": { + "has_been_moderated": false, + "has_been_blocked": false, + "has_been_accepted": false, + "has_been_auto_blocked": false, + "has_been_auto_moderated": false + } + } + ``` + """ + resp = self.session.post( + self.share_conversation_endpoint, + json={ + "conversation_id": conversation_id, + "current_node_id": self.chat_history(conversation_id, True)[ + "current_node" + ], + "is_anonymous": is_anonymous, + }, + ) + resp_1 = utils.is_json(resp, "share link") + resp_2 = self.session.patch( + self.share_conversation_patch_endpoint % dict(share_id=resp_1["share_id"]), + json={ + "share_id": resp_1["share_id"], + "highlighted_message_id": resp_1["highlighted_message_id"], + "title": resp_1["title"], + "is_public": is_public, + "is_visible": is_visible, + "is_anonymous": is_anonymous, + }, + ) + resp_1.update(utils.is_json(resp_2, "patch share link")) + return resp_1 + + def shared_conversations(self, order: str = "created") -> dict: + """Get previously shared conversations + + Args: + order (str, optional): Sorting paran. Defaults to 'created'. + + Returns: + dict: Conversations shared + ```json + { + "items": [ + { + "id": "57cf604b-37d6-4910-a47c-41xxxxxxxxxx", + "title": "Obama's Age: 62 Years", + "create_time": "2023-12-20T19:34:47.883282+00:00", + "update_time": "2023-12-20T19:44:35+00:00", + "mapping": null, + "current_node": null, + "conversation_template_id": null, + "gizmo_id": null, + "is_archived": null, + "workspace_id": null, + "conversation_id": "f8968cc4-8a48-4771-b16c-58xxxxxxxxxx", + "url": "https://chat.openai.com/share/57cf604b-37d6-4910-a47c-41xxxxxxxxxx" + } + ], + "total": 10, + "limit": 50, + "offset": 0, + "has_missing_conversations": false + } + + + """ + resp = self.session.get( + self.shared_conversations_endpoint, + ) + shareds = utils.is_json(resp) + for index, entry in enumerate(shareds["items"]): + # appends view url to each conversation + shareds["items"][index][ + "url" + ] = self.shared_conversation_view_endpoint % dict(share_id=entry["id"]) + return shareds + + def stop_sharing_conversation(self, share_id: str) -> dict: + """Deletes sharing link + + Args: + share_id (str): Shared conversation ID + + Returns: + dict: Success rate + """ + resp = self.session.delete( + self.stop_sharing_conversation_endpoint % dict(share_id=share_id) + ) + return utils.is_json(resp, "delete shared conversation") diff --git a/WebChatGPT/utils.py b/WebChatGPT/utils.py index 22b7249..7aac0ac 100644 --- a/WebChatGPT/utils.py +++ b/WebChatGPT/utils.py @@ -2,6 +2,7 @@ import json import logging import os +from uuid import uuid4 headers = request_headers = { "Accept": "text/event-stream", @@ -45,8 +46,8 @@ def main(*args, **kwargs): return decorator # -def get_request_headers(auth: str) -> dict: - """Generate Http request headers +def get_request_headers_and_append_auth(self) -> dict: + """Generate Http request headers & append OAuth Args: auth (str): OpenAI's authorization header @@ -54,8 +55,15 @@ def get_request_headers(auth: str) -> dict: Returns: dict: Request headers """ + resp = self.session.get( + "https://chat.openai.com/api/auth/session", + headers=request_headers, + ) + if not resp.ok: + raise Exception("Failed to fetch Auth value, supply path to correct cookies.") + self.auth = resp.json() auth_template = headers["Authorization"] - headers["Authorization"] = auth_template % {"value": auth} + headers["Authorization"] = auth_template % {"value": self.auth["accessToken"]} return headers @@ -138,7 +146,6 @@ def generate_telemetry_payload(self: object): "library": {"name": "analytics.js", "version": "npm:next-1.56.0"}, }, # "messageId": "ajs-next-3154852a6626ae6a48a031e2506fexxx", - # 1a50c897d53bfb315eb7270979e9726e "_metadata": { "bundled": ["Segment.io"], "unbundled": [], @@ -170,17 +177,28 @@ def generate_payload(self: object, prompt: str) -> dict: "metadata": {}, } ], - "conversation_id": self.conversation_metadata["id"], + # "conversation_id": self.conversation_metadata["id"], # "parent_message_id": "5b45a98c-0871-48ed-895b-f36f188cxxxx", "model": self.model, "timezone_offset_min": -180, - "suggestions": [] + self.suggestions, - "history_and_training_disabled": False, + "suggestions": [], + "history_and_training_disabled": self.disable_history_and_training, "arkose_token": None, "conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False, } + if self.current_conversation_id: + # Continuing conversation + payload_template["conversation_id"] = self.current_conversation_id + else: + # Create new conversation + payload_template["messages"][0]["id"] = str(uuid4()) + payload_template["suggestions"] = [ + prompt["prompt"] for prompt in self.prompt_library()["items"] + ] + + # print(json.dumps( payload_template,indent=4,)) return payload_template diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 9e02ea8..ee52a07 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -4,7 +4,20 @@ Initial Release ## v0.0.2 -**Whats New?** +**What's New?** - General code improvement -- Request timeout - 30 \ No newline at end of file +- Request timeout - 30 + +## v0.1.0 + +**What's New?** + +- Auto-fetch OAuth value +- Chat History +- Rename conversation +- Share conversation +- Delete conversation +- Retrieve shared conversations +- Stop sharing conversation +- Archive conversation \ No newline at end of file diff --git a/docs/DEVELOPER.md b/docs/DEVELOPER.md index 08ce4df..d8d5f97 100644 --- a/docs/DEVELOPER.md +++ b/docs/DEVELOPER.md @@ -3,7 +3,7 @@ ```python from WebChatGPT import ChatGPT bot = ChatGPT( - "", + "" ) print(bot.chat('')) @@ -31,7 +31,6 @@ dotenv.load_dotenv('.env') from_env = lambda key: os.environ.get(key) bot = ChatGPT( - from_env('openai_authorization',), from_env('openai_cookie_file') ) @@ -54,7 +53,6 @@ Furtherly, you can retrieve all the response returned in `dict` ```python from WebChatGPT import ChatGPT bot = ChatGPT( - "", "" ) response = bot.ask(') @@ -460,9 +458,39 @@ Output : +Other attributes include: + - Chat History : + ```py + bot.chat_history(conversation_id : str) + ``` + - Rename conversation : + ```py + bot.rename_conversation(conversation_id :str, title:str) + ``` + - Share conversation : + ```py + bot.share_conversation(conversation_id : str ) + ``` + - Delete conversation : + ```py + delete_conversation(conversation_id: str) + ``` + - Retrieve shared conversations : + ```py + shared_conversations() + ``` + - Stop sharing conversation : + ```py + stop_sharing_conversation(share_id : str) + ``` + - Archive conversation : + ```py + archive_conversation(conversation_id : str) + ``` + For further info, purpose to review the [flow of operations.](operations_flow.md) -> **Note** As at **v0.0.2**, `streaming response` is not implemented. Consider giving it a [PR](https://github.com/Simatwa/WebChatGPT/pulls). +> **Note** As of **v0.1.0**, `streaming response` is not implemented. Consider giving it a [PR](https://github.com/Simatwa/WebChatGPT/pulls).

CHANGELOG diff --git a/docs/README.md b/docs/README.md index bfdb2ac..f20290e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -25,7 +25,6 @@ Unlike the [official Openai library](https://github.com/openai/openai-python), t ```python from WebChatGPT import ChatGPT bot = ChatGPT( - "", "" ) response = bot.ask(') @@ -62,7 +61,6 @@ In terminal: - [x] Python>=3.10 Installed - [x] Chrome or Firefox browser -- [x] [Http-Tracker](https://github.com/venukbh/http-tracker) extension installed. - [x] [export-cookie-for-puppeteer](https://github.com/ktty1220/export-cookie-for-puppeteer) extension installed. ## Installation & usage @@ -89,21 +87,15 @@ pip install . The script utilizes [HTTP Cookies](https://en.wikipedia.org/wiki/HTTP_cookie) and [OAuth](https://en.wikipedia.org/wiki/OAuth) to justify the REST-API requests at [Openai](https://openai.com). -In order to do that, we will use the [Http-Tracker](https://github.com/venukbh/http-tracker) extension to harvest the `Oauth` and -[export-cookie-for-puppeteer](https://github.com/ktty1220/export-cookie-for-puppeteer) extension to extract the cookies. +In order to do that, we will use the [export-cookie-for-puppeteer](https://github.com/ktty1220/export-cookie-for-puppeteer) extension to extract the cookies. ### Procedure 1. Login to https://chat.openai.com -2. Upon successfull login, use **Export cookie JSON File Puppeteer** to download cookies. -3. Launch the **Http-Tracker** extension. -4. Back to ChatGPT, make a new conversation and then have a chat with it. -5. Back to Http-Tracker window, locate and click on the url row having `https://chat.openai.com/backend-api/conversation` to toggle a dropdown showing the http requests details. -6. On the *Request Details Table*, locate a Header having key `Authorization` and then copy it's corresponding value without the `Bearer` string and then paste it somewhere. -7. On your current directory of your **terminal**,create a [`.env`](https://github.com/Simatwa/WebChatGPT/blob/main/env) file and then save the contents in the format : +2. Upon successfull login, use **Export cookie JSON File Puppeteer** to export cookies. +3. On the current directory of your **terminal**,create a [`.env`](https://github.com/Simatwa/WebChatGPT/blob/main/env) file and save path to the cookie-file in the format : ``` -openai_authorization= openai_cookie_file= ``` @@ -172,7 +164,6 @@ Usage: webchatgpt generate [OPTIONS] Generate a quick response with ChatGPT Options: - -A, --auth TEXT OpenAI's authorization value -C, --cookie-path PATH Path to .json file containing cookies for `chat.openai.com` -M, --model TEXT ChatGPT's model to be used @@ -198,7 +189,6 @@ Usage: webchatgpt interactive [OPTIONS] Chat with ChatGPT interactively Options: - -A, --auth TEXT OpenAI's authorization value -C, --cookie-path PATH Path to .json file containing cookies for `chat.openai.com` -M, --model TEXT ChatGPT's model to be used