Skip to content

Commit e6d3e4a

Browse files
authored
💄 style: support together ai to fetch model list (#2138)
* 💄 style: support together ai model fetch * 💄 style: support together ai model fetch * 💄 style: support together ai model fetch * 💄 style: support together ai model fetch
1 parent f45f875 commit e6d3e4a

File tree

5 files changed

+117
-5
lines changed

5 files changed

+117
-5
lines changed

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@
9292
"@lobehub/chat-plugins-gateway": "latest",
9393
"@lobehub/icons": "latest",
9494
"@lobehub/tts": "latest",
95-
"@lobehub/ui": "^1.138.6",
95+
"@lobehub/ui": "^1.138.8",
9696
"@next/third-parties": "^14.2.2",
9797
"@sentry/nextjs": "^7.111.0",
9898
"@vercel/analytics": "^1.2.2",

src/app/settings/llm/TogetherAI/index.tsx

+1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ const TogetherAIProvider = memo(() => {
1010
return (
1111
<ProviderConfig
1212
checkModel={'togethercomputer/alpaca-7b'}
13+
modelList={{ showModelFetcher: true }}
1314
provider={'togetherai'}
1415
title={
1516
<Together.Combine

src/libs/agent-runtime/togetherai/index.ts

+32-1
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1+
import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
2+
13
import { AgentRuntimeErrorType } from '../error';
24
import { ModelProvider } from '../types';
35
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6+
import { TogetherAIModel } from './type';
47

8+
const baseURL = 'https://api.together.xyz';
59
export const LobeTogetherAI = LobeOpenAICompatibleFactory({
6-
baseURL: 'https://api.together.xyz/v1',
10+
baseURL: `${baseURL}/v1`,
711
constructorOptions: {
812
defaultHeaders: {
913
'HTTP-Referer': 'https://chat-preview.lobehub.com',
@@ -17,5 +21,32 @@ export const LobeTogetherAI = LobeOpenAICompatibleFactory({
1721
bizError: AgentRuntimeErrorType.TogetherAIBizError,
1822
invalidAPIKey: AgentRuntimeErrorType.InvalidTogetherAIAPIKey,
1923
},
24+
models: async ({ apiKey }) => {
25+
const data = await fetch(`${baseURL}/api/models`, {
26+
headers: {
27+
Authorization: `Bearer ${apiKey}`,
28+
},
29+
});
30+
if (!data.ok) {
31+
throw new Error(`Together Fetch Error: ${data.statusText || data.status}`);
32+
}
33+
34+
const models: TogetherAIModel[] = await data.json();
35+
36+
return models
37+
.filter((m) => m.display_type === 'chat')
38+
.map((model) => {
39+
return {
40+
description: model.description,
41+
displayName: model.display_name,
42+
enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.name.endsWith(m.id))?.enabled || false,
43+
functionCall: model.description?.includes('function calling'),
44+
id: model.name,
45+
maxOutput: model.context_length,
46+
tokens: model.context_length,
47+
vision: model.description?.includes('vision') || model.name?.includes('vision'),
48+
};
49+
});
50+
},
2051
provider: ModelProvider.TogetherAI,
2152
});
+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
2+
interface ModelInstanceConfig {
3+
appearsIn: any[]; // 你可以替换为实际类型
4+
order: number;
5+
}
6+
7+
interface Config {
8+
add_generation_prompt: boolean;
9+
chat_template: string;
10+
prompt_format: string;
11+
stop: string[];
12+
}
13+
14+
interface Pricing {
15+
hourly: number;
16+
input: number;
17+
output: number;
18+
}
19+
20+
interface Instance {
21+
avzone: string;
22+
cluster: string;
23+
}
24+
25+
interface Depth {
26+
asks: Record<string, number>;
27+
asks_updated: string;
28+
gpus: Record<string, number>;
29+
num_asks: number;
30+
num_bids: number;
31+
num_running: number;
32+
permit_required: boolean;
33+
price: {
34+
base: number;
35+
finetune: number;
36+
hourly: number;
37+
input: number;
38+
output: number;
39+
};
40+
qps: number;
41+
stats: {
42+
avzone: string;
43+
capacity: number;
44+
cluster: string;
45+
error_rate: number;
46+
qps: number;
47+
retry_rate: number;
48+
throughput_in: number;
49+
throughput_out: number;
50+
}[];
51+
}
52+
53+
export interface TogetherAIModel {
54+
_id: string;
55+
access: string;
56+
config: Config;
57+
context_length: number;
58+
created_at: string;
59+
creator_organization: string;
60+
depth: Depth;
61+
description: string;
62+
descriptionLink: string;
63+
display_name: string;
64+
display_type: string;
65+
hardware_label: string;
66+
instances: Instance[];
67+
isFeaturedModel: boolean;
68+
license: string;
69+
link: string;
70+
modelInstanceConfig: ModelInstanceConfig;
71+
name: string;
72+
num_parameters: number;
73+
pricing: Pricing;
74+
show_in_playground: boolean;
75+
update_at: string;
76+
}

src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts

+7-3
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,11 @@ interface OpenAICompatibleFactoryOptions {
3838
bizError: ILobeAgentRuntimeErrorType;
3939
invalidAPIKey: ILobeAgentRuntimeErrorType;
4040
};
41-
models?: {
42-
transformModel?: (model: OpenAI.Model) => ChatModelCard;
43-
};
41+
models?:
42+
| ((params: { apiKey: string }) => Promise<ChatModelCard[]>)
43+
| {
44+
transformModel?: (model: OpenAI.Model) => ChatModelCard;
45+
};
4446
provider: string;
4547
}
4648

@@ -122,6 +124,8 @@ export const LobeOpenAICompatibleFactory = ({
122124
}
123125

124126
async models() {
127+
if (typeof models === 'function') return models({ apiKey: this.client.apiKey });
128+
125129
const list = await this.client.models.list();
126130

127131
return list.data

0 commit comments

Comments
 (0)