Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

catch up latest commit #3689

Merged
merged 12 commits into from
Dec 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,11 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4

## What's New

- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
- 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
- 🚀 v2.8 now we have a client that runs across all platforms!
- 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms!
- 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).

## 主要功能

Expand Down
6 changes: 6 additions & 0 deletions app/api/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,12 @@ export async function requestOpenai(req: NextRequest) {
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");

// The latest version of the OpenAI API forced the content-encoding to be "br" in json response
// So if the streaming is disabled, we need to remove the content-encoding header
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
// The browser will try to decode the response with brotli and fail
newHeaders.delete("content-encoding");

return new Response(res.body, {
status: res.status,
statusText: res.statusText,
Expand Down
1 change: 1 addition & 0 deletions app/client/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ export function getHeaders() {
const headers: Record<string, string> = {
"Content-Type": "application/json",
"x-requested-with": "XMLHttpRequest",
"Accept": "application/json",
};
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
const isGoogle = modelConfig.model === "gemini-pro";
Expand Down
128 changes: 48 additions & 80 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ export class GeminiProApi implements LLMApi {
);
}
async chat(options: ChatOptions): Promise<void> {
const apiClient = this;
const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }],
Expand Down Expand Up @@ -61,8 +62,7 @@ export class GeminiProApi implements LLMApi {

console.log("[Request] google payload: ", requestPayload);

// todo: support stream later
const shouldStream = false;
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
Expand All @@ -82,13 +82,21 @@ export class GeminiProApi implements LLMApi {
if (shouldStream) {
let responseText = "";
let remainText = "";
let streamChatPath = chatPath.replace(
"generateContent",
"streamGenerateContent",
);
let finished = false;
const finish = () => {
finished = true;
options.onFinish(responseText + remainText);
};

// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
finish();
return;
}

Expand All @@ -105,88 +113,41 @@ export class GeminiProApi implements LLMApi {

// start animaion
animateResponseText();
fetch(streamChatPath, chatPayload)
.then((response) => {
const reader = response?.body?.getReader();
const decoder = new TextDecoder();
let partialData = "";

return reader?.read().then(function processText({
done,
value,
}): Promise<any> {
if (done) {
console.log("Stream complete");
// options.onFinish(responseText + remainText);
finished = true;
return Promise.resolve();
}

const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText);
}
};
partialData += decoder.decode(value, { stream: true });

controller.signal.onabort = finish;

fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[OpenAI] request response content type: ",
contentType,
);

if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}

if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}

if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
let data = JSON.parse(ensureProperEnding(partialData));
console.log(data);
let fetchText = apiClient.extractMessage(data[data.length - 1]);
console.log("[Response Animation] fetchText: ", fetchText);
remainText += fetchText;
} catch (error) {
// skip error message when parsing json
}

if (extraInfo) {
responseTexts.push(extraInfo);
}

responseText = responseTexts.join("\n\n");

return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text) as {
choices: Array<{
delta: {
content: string;
};
}>;
};
const delta = json.choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
return reader.read().then(processText);
});
})
.catch((error) => {
console.error("Error:", error);
});
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
Expand Down Expand Up @@ -220,3 +181,10 @@ export class GeminiProApi implements LLMApi {
return "/api/google/" + path;
}
}

function ensureProperEnding(str: string) {
if (str.startsWith("[") && !str.endsWith("]")) {
return str + "]";
}
return str;
}
13 changes: 12 additions & 1 deletion app/layout.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ import "./styles/markdown.scss";
import "./styles/highlight.scss";
import { getClientConfig } from "./config/client";
import { type Metadata } from "next";
import { SpeedInsights } from "@vercel/speed-insights/next";
import { getServerSideConfig } from "./config/server";

const serverConfig = getServerSideConfig();

export const metadata: Metadata = {
title: "NextChat",
Expand Down Expand Up @@ -35,7 +39,14 @@ export default function RootLayout({
<link rel="manifest" href="/site.webmanifest"></link>
<script src="/serviceWorkerRegister.js" defer></script>
</head>
<body>{children}</body>
<body>
{children}
{serverConfig?.isVercel && (
<>
<SpeedInsights />
</>
)}
</body>
</html>
);
}
6 changes: 5 additions & 1 deletion app/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@ export default async function App() {
return (
<>
<Home />
{serverConfig?.isVercel && <Analytics />}
{serverConfig?.isVercel && (
<>
<Analytics />
</>
)}
</>
);
}
4 changes: 3 additions & 1 deletion app/store/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,9 @@ export const useChatStore = createPersistStore(
const contextPrompts = session.mask.context.slice();

// system prompts, to get close to OpenAI Web ChatGPT
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
const shouldInjectSystemPrompts =
modelConfig.enableInjectSystemPrompts &&
session.mask.modelConfig.model.startsWith("gpt-");

var systemPrompts: ChatMessage[] = [];
systemPrompts = shouldInjectSystemPrompts
Expand Down
3 changes: 2 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
"@hello-pangea/dnd": "^16.5.0",
"@svgr/webpack": "^6.5.1",
"@vercel/analytics": "^0.1.11",
"@vercel/speed-insights": "^1.0.2",
"emoji-picker-react": "^4.5.15",
"fuse.js": "^7.0.0",
"html-to-image": "^1.11.11",
Expand All @@ -42,7 +43,7 @@
"zustand": "^4.3.8"
},
"devDependencies": {
"@tauri-apps/cli": "^1.5.8",
"@tauri-apps/cli": "1.5.7",
"@types/node": "^20.9.0",
"@types/react": "^18.2.14",
"@types/react-dom": "^18.2.7",
Expand Down
2 changes: 1 addition & 1 deletion src-tauri/tauri.conf.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
},
"package": {
"productName": "NextChat",
"version": "2.9.13"
"version": "2.10.1"
},
"tauri": {
"allowlist": {
Expand Down
Loading
Loading