Skip to content

Commit

Permalink
Release 5.1.0, New function: OpenAI proxy
Browse files Browse the repository at this point in the history
  • Loading branch information
Michael-X-Net committed Mar 8, 2023
1 parent 3960f27 commit 7e9e672
Show file tree
Hide file tree
Showing 11 changed files with 314 additions and 12 deletions.
10 changes: 8 additions & 2 deletions code/default/launcher/web_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ def do_POST(self):
self.postvars = json.loads(content)
else:
self.postvars = {}
content = b''
except Exception as e:
xlog.exception("do_POST %s except:%r", self.path, e)
self.postvars = {}
Expand All @@ -136,8 +137,13 @@ def do_POST(self):
elif url_path == "/set_proxy_applist":
return self.set_proxy_applist()

self.send_not_found()
xlog.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
elif url_path.startswith("/openai/"):
return module_init.proc_handler["x_tunnel"]["imp"].local.openai_handler.handle_openai(
"POST", url_path, self.headers, content, self.connection)

else:
self.send_not_found()
xlog.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)

def do_GET(self):
# self.headers = utils.to_str(self.headers)
Expand Down
2 changes: 1 addition & 1 deletion code/default/version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
5.0.8
5.1.0
6 changes: 6 additions & 0 deletions code/default/x_tunnel/lang/zh_CN/LC_MESSAGES/messages.po
Original file line number Diff line number Diff line change
Expand Up @@ -437,3 +437,9 @@ msgstr "指导"

msgid " to troubleshoot."
msgstr "去解决。"

msgid "ChatGPT Manual"
msgstr "ChatGPT指南 "

msgid "https://github.com/XX-net/XX-Net/wiki/ChatGPT_EN"
msgstr "https://github.com/XX-net/XX-Net/wiki/ChatGPT_CN "
2 changes: 2 additions & 0 deletions code/default/x_tunnel/local/global_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
server_port = 0
selectable = []
balance = 0
openai_balance = 0
openai_proxies = []
tls_relays = {}

stat = {
Expand Down
92 changes: 92 additions & 0 deletions code/default/x_tunnel/local/openai_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import random
import json
import base64
import time
import zlib

import utils

from . import global_var as g
from . import front_dispatcher
from . import proxy_session

from xlog import getLogger
xlog = getLogger("x_tunnel")

openai_chat_token_price = 0.000002
host = None

gzip_decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)


def get_auth_str():
info = {
"login_account": g.config.login_account,
"login_password": g.config.login_password
}
json_str = utils.to_bytes(json.dumps(info))
token = base64.b64encode(json_str)
return "Bearer " + utils.to_str(token)


auth_str = None


def get_openai_proxy(get_next_one=False):
global host
if get_next_one or not host:

if not (g.config.login_account and g.config.login_password):
return False

for _ in range(0, 3):
res, reason = proxy_session.request_balance(g.config.login_account, g.config.login_password)
if not res:
xlog.warn("x-tunnel request_balance fail when create_conn:%s", reason)
time.sleep(1)

if not g.openai_proxies:
return None

host = random.choice(g.openai_proxies)
return host


def handle_openai(method, path, headers, req_body, sock):
global auth_str
if not auth_str:
auth_str = get_auth_str()

host = get_openai_proxy()
if not host:
return sock.send(b'HTTP/1.1 401 Fail\r\n\r\n')

path = utils.to_str(path[7:])
headers = utils.to_str(headers)
headers["Authorization"] = auth_str
del headers["Host"]
try:
del headers["Accept-Encoding"]
except:
pass
content, status, response = front_dispatcher.request(method, host, path=path, headers=headers, data=req_body)

if status == 200:
try:
if response.headers.get(b"Content-Encoding") == b"gzip":
data = gzip_decompressor.decompress(content)
else:
data = content

dat = json.loads(data)
total_tokens = dat["usage"]["total_tokens"]
cost = total_tokens * openai_chat_token_price
g.openai_balance -= cost
except Exception as e1:
xlog.exception("cal tokens err:%r", e1)

sock.send(b'HTTP/1.1 %d OK\r\n' % (status))
for key, value in response.headers.items():
sock.send(b'%s: %s\r\n' % (key, value))
sock.send(b'\r\n')
sock.send(content)
13 changes: 10 additions & 3 deletions code/default/x_tunnel/local/proxy_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

from . import global_var as g
from . import proxy_session
from . import openai_handler


def netloc_to_host_port(netloc, default_port=80):
Expand Down Expand Up @@ -351,16 +352,22 @@ def http_handler(self, first_char):
lines = header_block.split(b"\r\n")
path = url
host = None
headers = {}
for line in lines:
key, _, value = line.rpartition(b":")
if key.lower == b"host":
key, _, value = line.partition(b":")
headers[key] = value
if key.lower() == b"host":
host, port = netloc_to_host_port(value)
break
if host is None:
xlog.warn("http proxy host can't parsed. %s %s", req_line, header_block)
self.connection.send(b'HTTP/1.1 500 Fail\r\n\r\n')
return

if url.startswith(b"/openai/"):
content_length = int(headers.get(b"Content-Length", 0))
req_body = self.read_bytes(content_length)
return openai_handler.handle_openai(method, url, headers, req_body, self.connection)

sock = self.connection
conn_id = proxy_session.create_conn(sock, host, port)
if not conn_id:
Expand Down
2 changes: 2 additions & 0 deletions code/default/x_tunnel/local/proxy_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -976,6 +976,8 @@ def request_balance(account=None, password=None, is_register=False, update_serve
g.promote_code = utils.to_str(info["promote_code"])
g.promoter = info["promoter"]
g.balance = info["balance"]
g.openai_balance = info["openai_balance"]
g.openai_proxies = info["openai_proxies"]
g.tls_relays = info["tls_relays"]
if g.tls_relay_front and g.tls_relays.get("ips"):
g.tls_relay_front.set_ips(g.tls_relays["ips"])
Expand Down
7 changes: 5 additions & 2 deletions code/default/x_tunnel/local/web_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,7 @@ def req_info_handler(self):
"paypal_button_id": g.paypal_button_id,
"plans": g.plans,
"balance": "%f" % (g.balance),
"openai_balance": float(g.openai_balance),
"quota": "%d" % (g.quota),
"quota_list": g.quota_list,
"traffic": g.session.traffic,
Expand Down Expand Up @@ -282,7 +283,8 @@ def req_token_login_handler(self):
g.config.save()
res_arr = {
"res": "success",
"balance": float(g.balance)
"balance": float(g.balance),
"openai_balance": float(g.openai_balance)
}
g.last_refresh_time = time.time()
g.session.start()
Expand Down Expand Up @@ -335,7 +337,8 @@ def req_login_handler(self):
g.config.save()
res_arr = {
"res": "success",
"balance": float(g.balance)
"balance": float(g.balance),
"openai_balance": float(g.openai_balance)
}
g.last_refresh_time = time.time()
g.session.start()
Expand Down
167 changes: 167 additions & 0 deletions code/default/x_tunnel/web_ui/chatgpt.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
<div >

<div class="row-fluid">
<div class="span1"><strong>{{ _( "Help" ) }}</strong></div> <!-- .span4 -->
<div class="span7" id="about_current_version"> <a href="{{ _( "https://github.com/XX-net/XX-Net/wiki/ChatGPT_EN" ) }}" target="_blank">{{ _( "ChatGPT Manual" ) }}</a></div> <!-- .span8 -->
</div> <!-- .div.fluid -->

<div class="row-fluid" id="output-area">
<div id="output" class="span12"></div> <!-- #log -->
</div>

<div class="row-fluid" id="input-area">
<div class="span10" id="input-box">
<textarea id="prompt" rows="2"></textarea>
</div> <!-- .span10 -->
<div class="span2">
<button class="btn btn-primary btn-block ask-button" id="ask" type="submit">
<div id="submit-text" >
{{ _( "Submit" ) }}
</div>
<div id="submit-loading" >
<div class="lds-facebook"> <div></div><div></div><div></div></div>
</div>
</button>

</div> <!-- .span12 -->
</div> <!-- .row-fluid -->

</div> <!-- #log-container -->

<script type="text/javascript">
title('{{ _("ChatGPT") }}');
$('#submit-loading').addClass('hide');
</script>

<script>
function append_output(text, text_class) {
text = text.replace(/\n/g, "<br>");
while (text.includes("```")){
text = text.replace("```", "</p><pre>");
text = text.replace("```", '</pre><p class="' + text_class + '">');
}
var newlines = document.createDocumentFragment();
var template = '<p class="%s">%s</p>\n';
var newline = $(template.format(text_class, text));
$(newlines).append(newline);
$('#output').append(newlines);
}

$('#ask').click(function () {
var prompt = $('#prompt').val();
console.log(prompt);
append_output(prompt, 'prompt_text');
$('#prompt').val('');

const req = {
model: "gpt-3.5-turbo",
// Submit only the role and content of the messages, provide the previous messages as well for context
messages: [
{
role: "user",
content: prompt
}
]
};
$('#submit-text').addClass('hide');
$('#submit-loading').removeClass('hide');

$.ajax({
type: 'POST',
url: '/openai/v1/chat/completions',
data: JSON.stringify(req),
dataType: 'JSON',
success: function (result) {
for (const choice of result['choices']) {
const message = choice["message"];
const content = message["content"];
append_output(content, "complete_text");
}
$('#output').scrollTop($('#output')[0].scrollHeight);

$('#submit-text').removeClass('hide');
$('#submit-loading').addClass('hide');
},
error: function () {
tip('{{ _( "Call API failed." ) }}', 'error');
$('#submit-text').removeClass('hide');
$('#submit-loading').addClass('hide');
}
});
});
</script>


<style type="text/css">
#output-area {
}
#output {
background-color: #f4f6f6;
border: 2px solid #d5dbdb;
border-radius: 6px;
color: #34495e;
font-size: 14.994px;
line-height: 24px;
max-width: 100%;
overflow-y: auto;
padding: 5px 11px;
text-indent: 0;
height: calc(100vh - 290px);
}
#input-area {
}
#input-box {
margin-top: 20px;
}
div#content textarea {
height: 60px;
}
.prompt_text {
color: blue;
}
.complete_text {
color: green;
}
.ask-button {
height: 80px;
}
#submit-loading{
margin-top: -10px;
}
.lds-facebook {
display: inline-block;
position: relative;
width: 80px;
height: 80px;
}
.lds-facebook div {
display: inline-block;
position: absolute;
left: 8px;
width: 16px;
background: #fff;
animation: lds-facebook 1.2s cubic-bezier(0, 0.5, 0.5, 1) infinite;
}
.lds-facebook div:nth-child(1) {
left: 8px;
animation-delay: -0.24s;
}
.lds-facebook div:nth-child(2) {
left: 32px;
animation-delay: -0.12s;
}
.lds-facebook div:nth-child(3) {
left: 56px;
animation-delay: 0;
}
@keyframes lds-facebook {
0% {
top: 8px;
height: 64px;
}
50%, 100% {
top: 24px;
height: 32px;
}
}
</style>
Loading

0 comments on commit 7e9e672

Please sign in to comment.