Skip to content

Commit 2bd799f

Browse files
authored
Merge pull request #5331 from ConnectAI-E/feature/plugin
Feature plugin (GPTs like action based on function call)
2 parents 8570457 + 9275f2d commit 2bd799f

31 files changed

+1569
-407
lines changed

README.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -91,13 +91,13 @@ For enterprise inquiries, please contact: **[email protected]**
9191
- [x] Desktop App with tauri
9292
- [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc.
9393
- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
94-
- [x] Plugins: support artifacts, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165)
95-
- [x] artifacts
96-
- [ ] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165)
94+
- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
95+
- [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
9796
- [ ] local knowledge base
9897

9998
## What's New
10099

100+
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
101101
- 🚀 v2.14.0 Now supports Artifacts & SD
102102
- 🚀 v2.10.1 support Google Gemini Pro model.
103103
- 🚀 v2.9.11 you can use azure endpoint now.
@@ -128,13 +128,13 @@ For enterprise inquiries, please contact: **[email protected]**
128128
- [x] 使用 tauri 打包桌面应用
129129
- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm)
130130
- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
131-
- [x] 插件机制,支持 artifacts,联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165)
132-
- [x] artifacts
133-
- [ ] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165)
131+
- [x] 插件机制,支持`联网搜索``计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
132+
- [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
134133
- [ ] 本地知识库
135134

136135
## 最新动态
137136

137+
- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
138138
- 🚀 v2.14.0 现在支持 Artifacts & SD 了。
139139
- 🚀 v2.10.1 现在支持 Gemini Pro 模型。
140140
- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。

app/api/[provider]/[...path]/route.ts

+5-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ import { handle as alibabaHandler } from "../../alibaba";
1010
import { handle as moonshotHandler } from "../../moonshot";
1111
import { handle as stabilityHandler } from "../../stability";
1212
import { handle as iflytekHandler } from "../../iflytek";
13+
import { handle as proxyHandler } from "../../proxy";
14+
1315
async function handle(
1416
req: NextRequest,
1517
{ params }: { params: { provider: string; path: string[] } },
@@ -36,8 +38,10 @@ async function handle(
3638
return stabilityHandler(req, { params });
3739
case ApiPath.Iflytek:
3840
return iflytekHandler(req, { params });
39-
default:
41+
case ApiPath.OpenAI:
4042
return openaiHandler(req, { params });
43+
default:
44+
return proxyHandler(req, { params });
4145
}
4246
}
4347

app/api/common.ts

+1-4
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,7 @@ export async function requestOpenai(req: NextRequest) {
3232
authHeaderName = "Authorization";
3333
}
3434

35-
let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll(
36-
"/api/openai/",
37-
"",
38-
);
35+
let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", "");
3936

4037
let baseUrl =
4138
(isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL;

app/api/proxy.ts

+75
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import { NextRequest, NextResponse } from "next/server";
2+
3+
export async function handle(
4+
req: NextRequest,
5+
{ params }: { params: { path: string[] } },
6+
) {
7+
console.log("[Proxy Route] params ", params);
8+
9+
if (req.method === "OPTIONS") {
10+
return NextResponse.json({ body: "OK" }, { status: 200 });
11+
}
12+
13+
// remove path params from searchParams
14+
req.nextUrl.searchParams.delete("path");
15+
req.nextUrl.searchParams.delete("provider");
16+
17+
const subpath = params.path.join("/");
18+
const fetchUrl = `${req.headers.get(
19+
"x-base-url",
20+
)}/${subpath}?${req.nextUrl.searchParams.toString()}`;
21+
const skipHeaders = ["connection", "host", "origin", "referer", "cookie"];
22+
const headers = new Headers(
23+
Array.from(req.headers.entries()).filter((item) => {
24+
if (
25+
item[0].indexOf("x-") > -1 ||
26+
item[0].indexOf("sec-") > -1 ||
27+
skipHeaders.includes(item[0])
28+
) {
29+
return false;
30+
}
31+
return true;
32+
}),
33+
);
34+
const controller = new AbortController();
35+
const fetchOptions: RequestInit = {
36+
headers,
37+
method: req.method,
38+
body: req.body,
39+
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
40+
redirect: "manual",
41+
// @ts-ignore
42+
duplex: "half",
43+
signal: controller.signal,
44+
};
45+
46+
const timeoutId = setTimeout(
47+
() => {
48+
controller.abort();
49+
},
50+
10 * 60 * 1000,
51+
);
52+
53+
try {
54+
const res = await fetch(fetchUrl, fetchOptions);
55+
// to prevent browser prompt for credentials
56+
const newHeaders = new Headers(res.headers);
57+
newHeaders.delete("www-authenticate");
58+
// to disable nginx buffering
59+
newHeaders.set("X-Accel-Buffering", "no");
60+
61+
// The latest version of the OpenAI API forced the content-encoding to be "br" in json response
62+
// So if the streaming is disabled, we need to remove the content-encoding header
63+
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
64+
// The browser will try to decode the response with brotli and fail
65+
newHeaders.delete("content-encoding");
66+
67+
return new Response(res.body, {
68+
status: res.status,
69+
statusText: res.statusText,
70+
headers: newHeaders,
71+
});
72+
} finally {
73+
clearTimeout(timeoutId);
74+
}
75+
}

app/client/api.ts

+9-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,13 @@ import {
55
ModelProvider,
66
ServiceProvider,
77
} from "../constant";
8-
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
8+
import {
9+
ChatMessageTool,
10+
ChatMessage,
11+
ModelType,
12+
useAccessStore,
13+
useChatStore,
14+
} from "../store";
915
import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai";
1016
import { GeminiProApi } from "./platforms/google";
1117
import { ClaudeApi } from "./platforms/anthropic";
@@ -56,6 +62,8 @@ export interface ChatOptions {
5662
onFinish: (message: string) => void;
5763
onError?: (err: Error) => void;
5864
onController?: (controller: AbortController) => void;
65+
onBeforeTool?: (tool: ChatMessageTool) => void;
66+
onAfterTool?: (tool: ChatMessageTool) => void;
5967
}
6068

6169
export interface LLMUsage {

0 commit comments

Comments
 (0)