Skip to content

Commit a84d807

Browse files
danielglharvinxx
andauthored
✨ feat: add gpt-4o-mini in OpenAI Provider and set it as the default model (#3256)
* ✨ feat: gpt-4o-mini to openai provider * ♻️ refactor: move gpt-4o-mini as default model * ✅ test: fix test * 🐛 fix: set gpt-4o-mini as default model * chore: improve code * chore: improve code --------- Co-authored-by: arvinxx <[email protected]>
1 parent 52d2b9d commit a84d807

File tree

12 files changed

+70
-66
lines changed

12 files changed

+70
-66
lines changed

.i18nrc.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ module.exports = defineConfig({
2323
'vi-VN',
2424
],
2525
temperature: 0,
26-
modelName: 'gpt-3.5-turbo-0125',
26+
modelName: 'gpt-4o-mini',
2727
splitToken: 2048,
2828
experimental: {
2929
jsonMode: true,

.seorc.cjs

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ const { defineConfig } = require('@lobehub/seo-cli');
22

33
module.exports = defineConfig({
44
entry: ['./docs/**/*.mdx'],
5-
modelName: 'gpt-3.5-turbo-0125',
5+
modelName: 'gpt-4o-mini',
66
experimental: {
77
jsonMode: true,
88
},

src/config/modelProviders/openai.ts

+59-50
Original file line numberDiff line numberDiff line change
@@ -4,48 +4,40 @@ import { ModelProviderCard } from '@/types/llm';
44
const OpenAI: ModelProviderCard = {
55
chatModels: [
66
{
7-
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
8-
displayName: 'GPT-3.5 Turbo',
7+
description: 'Currently points to gpt-4o-mini-2024-07-18',
8+
displayName: 'GPT-4o mini',
99
enabled: true,
1010
functionCall: true,
11-
id: 'gpt-3.5-turbo',
12-
tokens: 16_385,
11+
id: 'gpt-4o-mini',
12+
maxOutput: 16_385,
13+
tokens: 128_000,
14+
vision: true,
1315
},
1416
{
15-
displayName: 'GPT-3.5 Turbo (0125)',
17+
description: 'Currently points to gpt-4o-2024-05-13',
18+
displayName: 'GPT-4o',
19+
enabled: true,
1620
functionCall: true,
17-
id: 'gpt-3.5-turbo-0125',
18-
tokens: 16_385,
21+
id: 'gpt-4o',
22+
tokens: 128_000,
23+
vision: true,
1924
},
2025
{
21-
displayName: 'GPT-3.5 Turbo (1106)',
26+
description: 'GPT-4 Turbo with Vision',
27+
displayName: 'GPT-4 Turbo',
28+
enabled: true,
2229
functionCall: true,
23-
id: 'gpt-3.5-turbo-1106',
24-
tokens: 16_385,
25-
},
26-
{
27-
displayName: 'GPT-3.5 Turbo Instruct',
28-
id: 'gpt-3.5-turbo-instruct',
29-
tokens: 4096,
30-
},
31-
{
32-
description: 'Currently points to gpt-3.5-turbo-16k-0613',
33-
displayName: 'GPT-3.5 Turbo 16K',
34-
id: 'gpt-3.5-turbo-16k',
35-
legacy: true,
36-
tokens: 16_385,
37-
},
38-
{
39-
displayName: 'GPT-3.5 Turbo (0613)',
40-
id: 'gpt-3.5-turbo-0613',
41-
legacy: true,
42-
tokens: 4096,
30+
id: 'gpt-4-turbo',
31+
tokens: 128_000,
32+
vision: true,
4333
},
4434
{
45-
displayName: 'GPT-3.5 Turbo 16K (0613)',
46-
id: 'gpt-3.5-turbo-16k-0613',
47-
legacy: true,
48-
tokens: 16_385,
35+
description: 'GPT-4 Turbo 视觉版 (240409)',
36+
displayName: 'GPT-4 Turbo Vision (240409)',
37+
functionCall: true,
38+
id: 'gpt-4-turbo-2024-04-09',
39+
tokens: 128_000,
40+
vision: true,
4941
},
5042
{
5143
description: 'Currently points to gpt-4-0125-preview',
@@ -106,33 +98,50 @@ const OpenAI: ModelProviderCard = {
10698
tokens: 32_768,
10799
},
108100
{
109-
description: 'GPT-4 Turbo with Vision',
110-
displayName: 'GPT-4 Turbo',
111-
enabled: true,
101+
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
102+
displayName: 'GPT-3.5 Turbo',
112103
functionCall: true,
113-
id: 'gpt-4-turbo',
114-
tokens: 128_000,
115-
vision: true,
104+
id: 'gpt-3.5-turbo',
105+
tokens: 16_385,
116106
},
117107
{
118-
description: 'GPT-4 Turbo 视觉版 (240409)',
119-
displayName: 'GPT-4 Turbo Vision (240409)',
108+
displayName: 'GPT-3.5 Turbo (0125)',
120109
functionCall: true,
121-
id: 'gpt-4-turbo-2024-04-09',
122-
tokens: 128_000,
123-
vision: true,
110+
id: 'gpt-3.5-turbo-0125',
111+
tokens: 16_385,
124112
},
125113
{
126-
description: 'Currently points to gpt-4o-2024-05-13',
127-
displayName: 'GPT-4o',
128-
enabled: true,
114+
displayName: 'GPT-3.5 Turbo (1106)',
129115
functionCall: true,
130-
id: 'gpt-4o',
131-
tokens: 128_000,
132-
vision: true,
116+
id: 'gpt-3.5-turbo-1106',
117+
tokens: 16_385,
118+
},
119+
{
120+
displayName: 'GPT-3.5 Turbo Instruct',
121+
id: 'gpt-3.5-turbo-instruct',
122+
tokens: 4096,
123+
},
124+
{
125+
description: 'Currently points to gpt-3.5-turbo-16k-0613',
126+
displayName: 'GPT-3.5 Turbo 16K',
127+
id: 'gpt-3.5-turbo-16k',
128+
legacy: true,
129+
tokens: 16_385,
130+
},
131+
{
132+
displayName: 'GPT-3.5 Turbo (0613)',
133+
id: 'gpt-3.5-turbo-0613',
134+
legacy: true,
135+
tokens: 4096,
136+
},
137+
{
138+
displayName: 'GPT-3.5 Turbo 16K (0613)',
139+
id: 'gpt-3.5-turbo-16k-0613',
140+
legacy: true,
141+
tokens: 16_385,
133142
},
134143
],
135-
checkModel: 'gpt-3.5-turbo',
144+
checkModel: 'gpt-4o-mini',
136145
enabled: true,
137146
id: 'openai',
138147
modelList: { showModelFetcher: true },

src/const/settings/agent.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { DEFAULT_AGENT_META } from '@/const/meta';
2+
import { DEFAULT_MODEL } from '@/const/settings/llm';
23
import { ModelProvider } from '@/libs/agent-runtime';
34
import { LobeAgentChatConfig, LobeAgentConfig, LobeAgentTTSConfig } from '@/types/agent';
45
import { UserDefaultAgent } from '@/types/user/settings';
@@ -21,7 +22,7 @@ export const DEFAULT_AGENT_CHAT_CONFIG: LobeAgentChatConfig = {
2122

2223
export const DEFAULT_AGENT_CONFIG: LobeAgentConfig = {
2324
chatConfig: DEFAULT_AGENT_CHAT_CONFIG,
24-
model: 'gpt-3.5-turbo',
25+
model: DEFAULT_MODEL,
2526
params: {
2627
frequency_penalty: 0,
2728
presence_penalty: 0,

src/const/settings/llm.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,6 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
111111
},
112112
};
113113

114-
export const DEFAULT_MODEL = 'gpt-3.5-turbo';
114+
export const DEFAULT_MODEL = 'gpt-4o-mini';
115115

116116
export const DEFAULT_PROVIDER = ModelProvider.OpenAI;

src/database/client/schemas/session.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { z } from 'zod';
22

3+
import { DEFAULT_MODEL } from '@/const/settings';
34
import { AgentChatConfigSchema } from '@/types/agent';
45
import { LobeMetaDataSchema } from '@/types/meta';
56

@@ -26,7 +27,7 @@ const ttsSchema = z.object({
2627
export const AgentSchema = z.object({
2728
chatConfig: AgentChatConfigSchema,
2829
fewShots: fewShotsSchema.optional(),
29-
model: z.string().default('gpt-3.5-turbo'),
30+
model: z.string().default(DEFAULT_MODEL),
3031
params: z.object({
3132
frequency_penalty: z.number().default(0).optional(),
3233
max_tokens: z.number().optional(),

src/libs/agent-runtime/openai/__snapshots__/index.test.ts.snap

-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ exports[`LobeOpenAI > models > should get models 1`] = `
55
{
66
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
77
"displayName": "GPT-3.5 Turbo",
8-
"enabled": true,
98
"functionCall": true,
109
"id": "gpt-3.5-turbo",
1110
"tokens": 16385,

src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ This project is led by [qnguyen3](https://twitter.com/stablequan) and [teknium](
3333
3434
Updated by OpenAI to point to the [latest version of GPT-3.5](/models?q=openai/gpt-3.5). Training data up to Sep 2021.",
3535
"displayName": "OpenAI: GPT-3.5 Turbo",
36-
"enabled": true,
36+
"enabled": false,
3737
"functionCall": false,
3838
"id": "openai/gpt-3.5-turbo",
3939
"maxTokens": 4096,

src/locales/default/setting.ts

-6
Original file line numberDiff line numberDiff line change
@@ -213,12 +213,6 @@ export default {
213213
},
214214
model: {
215215
desc: '{{provider}} 模型',
216-
list: {
217-
'gpt-3.5-turbo': 'GPT 3.5',
218-
'gpt-3.5-turbo-16k': 'GPT 3.5 (16K)',
219-
'gpt-4': 'GPT 4',
220-
'gpt-4-32k': 'GPT 4 (32K)',
221-
},
222216
title: '模型',
223217
},
224218
presencePenalty: {

src/store/user/slices/modelList/selectors/modelProvider.test.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ describe('modelProviderSelectors', () => {
4949
const s = merge(initialState, {}) as unknown as UserStore;
5050

5151
const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
52-
expect(result).toEqual(['gpt-3.5-turbo', 'gpt-4-turbo', 'gpt-4o']);
52+
expect(result).toEqual(['gpt-4o-mini', 'gpt-4o', 'gpt-4-turbo']);
5353
});
5454

5555
it('should return undefined for a non-existing provider', () => {

src/store/user/slices/modelList/selectors/modelProvider.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ export const getDefaultModeProviderById = (provider: string) => (s: UserStore) =
4949
/**
5050
* get the default enabled models for a provider
5151
* it's a default enabled model list by Lobe Chat
52-
* e.g. openai is ['gpt-3.5-turbo','gpt-4-turbo']
52+
* e.g. openai is ['gpt-4o-mini','gpt-4o','gpt-4-turbo']
5353
*/
5454
const getDefaultEnabledModelsById = (provider: string) => (s: UserStore) => {
5555
const modelProvider = getDefaultModeProviderById(provider)(s);

src/types/agent/index.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ export interface LobeAgentConfig {
2020
fewShots?: FewShots;
2121
/**
2222
* 角色所使用的语言模型
23-
* @default gpt-3.5-turbo
23+
* @default gpt-4o-mini
2424
*/
2525
model: string;
2626
/**

0 commit comments

Comments
 (0)