Skip to content

Commit c3ba74c

Browse files
authored
💄 style: update model description
1 parent bed01e3 commit c3ba74c

File tree

5 files changed

+33
-29
lines changed

5 files changed

+33
-29
lines changed

Dockerfile

+1-1
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ ENV \
158158
# Cloudflare
159159
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
160160
# Cohere
161-
COHERE_API_KEY="" COHERE_MODEL_LIST="" \
161+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
162162
# DeepSeek
163163
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
164164
# Fireworks AI

Dockerfile.database

+1-1
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ ENV \
201201
# Cloudflare
202202
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
203203
# Cohere
204-
COHERE_API_KEY="" COHERE_MODEL_LIST="" \
204+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
205205
# DeepSeek
206206
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
207207
# Fireworks AI

Dockerfile.pglite

+1-1
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ ENV \
159159
# Cloudflare
160160
CLOUDFLARE_API_KEY="" CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID="" CLOUDFLARE_MODEL_LIST="" \
161161
# Cohere
162-
COHERE_API_KEY="" COHERE_MODEL_LIST="" \
162+
COHERE_API_KEY="" COHERE_MODEL_LIST="" COHERE_PROXY_URL="" \
163163
# DeepSeek
164164
DEEPSEEK_API_KEY="" DEEPSEEK_MODEL_LIST="" \
165165
# Fireworks AI

src/config/aiModels/cohere.ts

+30-24
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@ const cohereChatModels: AIChatModelCard[] = [
66
functionCall: true,
77
},
88
contextWindowTokens: 256_000,
9-
description: 'Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.',
9+
description:
10+
'Command A 是我们迄今为止性能最强的模型,在工具使用、代理、检索增强生成(RAG)和多语言应用场景方面表现出色。Command A 具有 256K 的上下文长度,仅需两块 GPU 即可运行,并且相比于 Command R+ 08-2024,吞吐量提高了 150%。',
1011
displayName: 'Command A',
1112
enabled: true,
1213
id: 'command-a-03-2025',
@@ -22,7 +23,8 @@ const cohereChatModels: AIChatModelCard[] = [
2223
functionCall: true,
2324
},
2425
contextWindowTokens: 128_000,
25-
description: 'command-r-plus is an alias for command-r-plus-04-2024, so if you use command-r-plus in the API, that’s the model you’re pointing to.',
26+
description:
27+
'command-r-plus 是 command-r-plus-04-2024 的别名,因此如果您在 API 中使用 command-r-plus,实际上指向的就是该模型。',
2628
displayName: 'Command R+',
2729
enabled: true,
2830
id: 'command-r-plus',
@@ -38,7 +40,8 @@ const cohereChatModels: AIChatModelCard[] = [
3840
functionCall: true,
3941
},
4042
contextWindowTokens: 128_000,
41-
description: 'Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.',
43+
description:
44+
'Command R+ 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它最适用于复杂的 RAG 工作流和多步工具使用。',
4245
displayName: 'Command R+ 04-2024',
4346
id: 'command-r-plus-04-2024',
4447
maxOutput: 4000,
@@ -53,7 +56,8 @@ const cohereChatModels: AIChatModelCard[] = [
5356
functionCall: true,
5457
},
5558
contextWindowTokens: 128_000,
56-
description: 'command-r is an alias for command-r-03-2024, so if you use command-r in the API, that’s the model you’re pointing to.',
59+
description:
60+
'command-r 是 command-c-03-2024 的别名,因此如果您在 API 中使用 command-r,实际上指向的就是该模型。',
5761
displayName: 'Command R',
5862
enabled: true,
5963
id: 'command-r',
@@ -69,7 +73,8 @@ const cohereChatModels: AIChatModelCard[] = [
6973
functionCall: true,
7074
},
7175
contextWindowTokens: 128_000,
72-
description: 'command-r-08-2024 is an update of the Command R model, delivered in August 2024.',
76+
description:
77+
'command-r-08-2024 是 Command R 模型的更新版本,于 2024 年 8 月发布。',
7378
displayName: 'Command R 08-2024',
7479
id: 'command-r-08-2024',
7580
maxOutput: 4000,
@@ -84,7 +89,8 @@ const cohereChatModels: AIChatModelCard[] = [
8489
functionCall: true,
8590
},
8691
contextWindowTokens: 128_000,
87-
description: 'Command R is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It can be used for complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.',
92+
description:
93+
'Command R 是一个遵循指令的对话模型,在语言任务方面表现出更高的质量、更可靠,并且相比以往模型具有更长的上下文长度。它可用于复杂的工作流程,如代码生成、检索增强生成(RAG)、工具使用和代理。',
8894
displayName: 'Command R 03-2024',
8995
id: 'command-r-03-2024',
9096
maxOutput: 4000,
@@ -99,7 +105,8 @@ const cohereChatModels: AIChatModelCard[] = [
99105
functionCall: true,
100106
},
101107
contextWindowTokens: 128_000,
102-
description: 'command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.',
108+
description:
109+
'command-r7b-12-2024 是一个小型且高效的更新版本,于 2024 年 12 月发布。它在 RAG、工具使用、代理等需要复杂推理和多步处理的任务中表现出色。',
103110
displayName: 'Command R7B 12-2024',
104111
enabled: true,
105112
id: 'command-r7b-12-2024',
@@ -111,11 +118,9 @@ const cohereChatModels: AIChatModelCard[] = [
111118
type: 'chat'
112119
},
113120
{
114-
abilities: {
115-
functionCall: true,
116-
},
117121
contextWindowTokens: 4000,
118-
description: 'An instruction-following conversational model that performs language tasks with high quality, more reliably and with a longer context than our base generative models.',
122+
description:
123+
'一个遵循指令的对话模型,在语言任务中表现出高质量、更可靠,并且相比我们的基础生成模型具有更长的上下文长度。',
119124
displayName: 'Command',
120125
enabled: true,
121126
id: 'command',
@@ -131,7 +136,8 @@ const cohereChatModels: AIChatModelCard[] = [
131136
functionCall: true,
132137
},
133138
contextWindowTokens: 128_000,
134-
description: 'To reduce the time between major releases, we put out nightly versions of command models. For command, that is command-nightly. Be advised that command-nightly is the latest, most experimental, and (possibly) unstable version of its default counterpart. Nightly releases are updated regularly, without warning, and are not recommended for production use.',
139+
description:
140+
'为了缩短主要版本发布之间的时间间隔,我们推出了 Command 模型的每夜版本。对于 Command 系列,这一版本称为 command-cightly。请注意,command-nightly 是最新、最具实验性且(可能)不稳定的版本。每夜版本会定期更新,且不会提前通知,因此不建议在生产环境中使用。',
135141
displayName: 'Command Nightly',
136142
id: 'command-nightly',
137143
maxOutput: 4000,
@@ -142,11 +148,9 @@ const cohereChatModels: AIChatModelCard[] = [
142148
type: 'chat'
143149
},
144150
{
145-
abilities: {
146-
functionCall: true,
147-
},
148151
contextWindowTokens: 4000,
149-
description: 'A smaller, faster version of command. Almost as capable, but a lot faster.',
152+
description:
153+
'一个更小、更快的 Command 版本,几乎同样强大,但速度更快。',
150154
displayName: 'Command Light',
151155
enabled: true,
152156
id: 'command-light',
@@ -158,11 +162,9 @@ const cohereChatModels: AIChatModelCard[] = [
158162
type: 'chat'
159163
},
160164
{
161-
abilities: {
162-
functionCall: true,
163-
},
164165
contextWindowTokens: 4000,
165-
description: 'To reduce the time between major releases, we put out nightly versions of command models. For command-light, that is command-light-nightly. Be advised that command-light-nightly is the latest, most experimental, and (possibly) unstable version of its default counterpart. Nightly releases are updated regularly, without warning, and are not recommended for production use.',
166+
description:
167+
'为了缩短主要版本发布之间的时间间隔,我们推出了 Command 模型的每夜版本。对于 command-light 系列,这一版本称为 command-light-nightly。请注意,command-light-nightly 是最新、最具实验性且(可能)不稳定的版本。每夜版本会定期更新,且不会提前通知,因此不建议在生产环境中使用。',
166168
displayName: 'Command Light Nightly',
167169
id: 'command-light-nightly',
168170
maxOutput: 4000,
@@ -174,7 +176,8 @@ const cohereChatModels: AIChatModelCard[] = [
174176
},
175177
{
176178
contextWindowTokens: 128_000,
177-
description: 'Aya Expanse is a highly performant 32B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.',
179+
description:
180+
'Aya Expanse 是一款高性能的 32B 多语言模型,旨在通过指令调优、数据套利、偏好训练和模型合并的创新,挑战单语言模型的表现。它支持 23 种语言。',
178181
displayName: 'Aya Expanse 32B',
179182
enabled: true,
180183
id: 'c4ai-aya-expanse-32b',
@@ -187,7 +190,8 @@ const cohereChatModels: AIChatModelCard[] = [
187190
},
188191
{
189192
contextWindowTokens: 8000,
190-
description: 'Aya Expanse is a highly performant 8B multilingual model, designed to rival monolingual performance through innovations in instruction tuning with data arbitrage, preference training, and model merging. Serves 23 languages.',
193+
description:
194+
'Aya Expanse 是一款高性能的 8B 多语言模型,旨在通过指令调优、数据套利、偏好训练和模型合并的创新,挑战单语言模型的表现。它支持 23 种语言。',
191195
displayName: 'Aya Expanse 8B',
192196
enabled: true,
193197
id: 'c4ai-aya-expanse-8b',
@@ -203,7 +207,8 @@ const cohereChatModels: AIChatModelCard[] = [
203207
vision: true,
204208
},
205209
contextWindowTokens: 16_000,
206-
description: 'Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. Serves 23 languages. This 32 billion parameter variant is focused on state-of-art multilingual performance.',
210+
description:
211+
'Aya Vision 是一款最先进的多模态模型,在语言、文本和图像能力的多个关键基准上表现出色。它支持 23 种语言。这个 320 亿参数的版本专注于最先进的多语言表现。',
207212
displayName: 'Aya Vision 32B',
208213
enabled: true,
209214
id: 'c4ai-aya-vision-32b',
@@ -219,7 +224,8 @@ const cohereChatModels: AIChatModelCard[] = [
219224
vision: true,
220225
},
221226
contextWindowTokens: 16_000,
222-
description: 'Aya Vision is a state-of-the-art multimodal model excelling at a variety of critical benchmarks for language, text, and image capabilities. This 8 billion parameter variant is focused on low latency and best-in-class performance.',
227+
description:
228+
'Aya Vision 是一款最先进的多模态模型,在语言、文本和图像能力的多个关键基准上表现出色。这个 80 亿参数的版本专注于低延迟和最佳性能。',
223229
displayName: 'Aya Vision 8B',
224230
enabled: true,
225231
id: 'c4ai-aya-vision-8b',

src/config/modelProviders/cohere.ts

-2
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,10 @@ const Cohere: ModelProviderCard = {
44
chatModels: [],
55
checkModel: 'command-r7b-12-2024',
66
description: 'Cohere',
7-
//disableBrowserRequest: true,
87
id: 'cohere',
98
modelsUrl: 'https://docs.cohere.com/v2/docs/models',
109
name: 'Cohere',
1110
settings: {
12-
//disableBrowserRequest: true,
1311
proxyUrl: {
1412
placeholder: 'https://api.cohere.ai/compatibility/v1',
1513
},

0 commit comments

Comments
 (0)