Skip to content

Commit ad3a154

Browse files
authored
♻️ refactor: refactor and clean some code (#4629)
* ♻️ refactor: refactor a few code * Update .gitignore
1 parent bb25f31 commit ad3a154

File tree

7 files changed

+22
-34
lines changed

7 files changed

+22
-34
lines changed

src/app/(backend)/webapi/chat/anthropic/route.ts

-12
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,5 @@
11
import { POST as UniverseRoute } from '../[provider]/route';
22

3-
// due to the Chinese region does not support accessing Google
4-
// we need to use proxy to access it
5-
// refs: https://github.com/google/generative-ai-js/issues/29#issuecomment-1866246513
6-
// if (process.env.HTTP_PROXY_URL) {
7-
// const { setGlobalDispatcher, ProxyAgent } = require('undici');
8-
//
9-
// console.log(process.env.HTTP_PROXY_URL)
10-
// setGlobalDispatcher(new ProxyAgent({ uri: process.env.HTTP_PROXY_URL }));
11-
// }
12-
13-
// but undici only can be used in NodeJS
14-
// so if you want to use with proxy, you need comment the code below
153
export const runtime = 'edge';
164

175
export const preferredRegion = [

src/app/(backend)/webapi/chat/google/route.ts

-11
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,5 @@
11
import { POST as UniverseRoute } from '../[provider]/route';
22

3-
// due to the Chinese region does not support accessing Google
4-
// we need to use proxy to access it
5-
// refs: https://github.com/google/generative-ai-js/issues/29#issuecomment-1866246513
6-
// if (process.env.HTTP_PROXY_URL) {
7-
// const { setGlobalDispatcher, ProxyAgent } = require('undici');
8-
//
9-
// setGlobalDispatcher(new ProxyAgent({ uri: process.env.HTTP_PROXY_URL }));
10-
// }
11-
12-
// but undici only can be used in NodeJS
13-
// so if you want to use with proxy, you need comment the code below
143
export const runtime = 'edge';
154

165
// due to Gemini-1.5-pro is not available in Hong Kong, we need to set the preferred region to exclude "Hong Kong (hkg1)".

src/app/(backend)/webapi/chat/wenxin/route.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { getLLMConfig } from '@/config/llm';
2-
import { AgentRuntime } from '@/libs/agent-runtime';
2+
import { AgentRuntime, ModelProvider } from '@/libs/agent-runtime';
33
import LobeWenxinAI from '@/libs/agent-runtime/wenxin';
44

55
import { POST as UniverseRoute } from '../[provider]/route';
@@ -26,5 +26,5 @@ export const POST = async (req: Request) =>
2626

2727
return new AgentRuntime(instance);
2828
},
29-
params: { provider: 'wenxin' },
29+
params: { provider: ModelProvider.Wenxin },
3030
});

src/app/(main)/settings/llm/components/ProviderConfig/index.tsx

+9-6
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ const ProviderConfig = memo<ProviderConfigProps>(
116116
className,
117117
name,
118118
showAceGcm = true,
119+
showChecker = true,
119120
extra,
120121
}) => {
121122
const { t } = useTranslation('setting');
@@ -219,12 +220,14 @@ const ProviderConfig = memo<ProviderConfigProps>(
219220
label: t('llm.modelList.title'),
220221
name: [LLMProviderConfigKey, id, LLMProviderModelListKey],
221222
},
222-
checkerItem ?? {
223-
children: <Checker model={checkModel!} provider={id} />,
224-
desc: t('llm.checker.desc'),
225-
label: t('llm.checker.title'),
226-
minWidth: undefined,
227-
},
223+
showChecker
224+
? (checkerItem ?? {
225+
children: <Checker model={checkModel!} provider={id} />,
226+
desc: t('llm.checker.desc'),
227+
label: t('llm.checker.title'),
228+
minWidth: undefined,
229+
})
230+
: undefined,
228231
showAceGcm && isServerMode && aceGcmItem,
229232
].filter(Boolean) as FormItemProps[];
230233

src/server/globalConfig/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ export const getServerGlobalConfig = () => {
103103

104104
ENABLED_AI21,
105105
AI21_MODEL_LIST,
106-
106+
107107
ENABLED_AI360,
108108
AI360_MODEL_LIST,
109109

src/server/modules/AgentRuntime/index.ts

+6-2
Original file line numberDiff line numberDiff line change
@@ -266,8 +266,12 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
266266
case ModelProvider.SenseNova: {
267267
const { SENSENOVA_ACCESS_KEY_ID, SENSENOVA_ACCESS_KEY_SECRET } = getLLMConfig();
268268

269-
const sensenovaAccessKeyID = apiKeyManager.pick(payload?.sensenovaAccessKeyID || SENSENOVA_ACCESS_KEY_ID);
270-
const sensenovaAccessKeySecret = apiKeyManager.pick(payload?.sensenovaAccessKeySecret || SENSENOVA_ACCESS_KEY_SECRET);
269+
const sensenovaAccessKeyID = apiKeyManager.pick(
270+
payload?.sensenovaAccessKeyID || SENSENOVA_ACCESS_KEY_ID,
271+
);
272+
const sensenovaAccessKeySecret = apiKeyManager.pick(
273+
payload?.sensenovaAccessKeySecret || SENSENOVA_ACCESS_KEY_SECRET,
274+
);
271275

272276
const apiKey = sensenovaAccessKeyID + ':' + sensenovaAccessKeySecret;
273277

src/types/llm.ts

+4
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,10 @@ export interface ModelProviderCard {
122122
* so provider like ollama don't need api key field
123123
*/
124124
showApiKey?: boolean;
125+
/**
126+
* whether show checker in the provider config
127+
*/
128+
showChecker?: boolean;
125129
/**
126130
* whether to smoothing the output
127131
*/

0 commit comments

Comments
 (0)