@@ -11,60 +11,78 @@ This guide focuses primarily on configuring and using various LLM clients suppor
11
11
## OpenAI GPT-4 Client Setup
12
12
13
13
``` python
14
- import giskard
15
14
import os
15
+ import giskard
16
16
from giskard.llm.client.openai import OpenAIClient
17
17
18
+ # Set the OpenAI API key
18
19
os.environ[" OPENAI_API_KEY" ] = " sk-…"
19
20
21
+ # Create a giskard OpenAI client
22
+ openai_client = OpenAIClient(model = " gpt-4o" )
23
+
24
+ # Set the default client
20
25
giskard.llm.set_llm_api(" openai" )
21
- oc = OpenAIClient(model = " gpt-4-turbo-preview" )
22
- giskard.llm.set_default_client(oc)
26
+ giskard.llm.set_default_client(openai_client)
23
27
```
24
28
25
29
## Azure OpenAI Client Setup
26
30
27
31
``` python
28
32
import os
29
- from giskard.llm import set_llm_model
30
- from giskard.llm.embeddings.openai import set_embedding_model
33
+ import giskard
31
34
35
+ # Set the Azure OpenAI API key and endpoint
32
36
os.environ[' AZURE_OPENAI_API_KEY' ] = ' ...'
33
37
os.environ[' AZURE_OPENAI_ENDPOINT' ] = ' https://xxx.openai.azure.com'
34
38
os.environ[' OPENAI_API_VERSION' ] = ' 2023-07-01-preview'
35
39
36
-
37
40
# You'll need to provide the name of the model that you've deployed
38
41
# Beware, the model provided must be capable of using function calls
39
- set_llm_model(' my-gpt-4-model' )
40
- set_embedding_model(' my-embedding-model' ) # Optional
42
+ giskard.llm. set_llm_model(' my-gpt-4-model' )
43
+ giskard.llm.embeddings. set_embedding_model(' my-embedding-model' )
41
44
```
42
45
43
46
## Mistral Client Setup
44
47
45
48
``` python
46
49
import os
50
+ import giskard
47
51
from giskard.llm.client.mistral import MistralClient
48
52
49
- os.environ[" MISTRAL_API_KEY" ] = " sk-…"
53
+ # Set the Mistral API key
54
+ os.environ[" MISTRAL_API_KEY" ] = " …"
50
55
51
- mc = MistralClient()
52
- giskard.llm.set_default_client(mc)
56
+ # Create a giskard Mistral client
57
+ mistral_client = MistralClient()
58
+
59
+ # Set the default client
60
+ giskard.llm.set_default_client(mistral_client)
61
+
62
+ # You may also want to set the default embedding model
63
+ # Check the Custom Client Setup section for more details
53
64
```
54
65
55
66
## Ollama Client Setup
56
67
57
68
The Ollama setup involves configuring an OpenAI client customized for the Ollama API:
58
69
59
70
``` python
71
+ import giskard
60
72
from openai import OpenAI
61
73
from giskard.llm.client.openai import OpenAIClient
62
- from giskard.llm.client.mistral import MistralClient
74
+ from giskard.llm.embeddings.openai import OpenAIEmbedding
63
75
64
- # Setup the Ollama client with API key and base URL
76
+ # Setup the OpenAI client with API key and base URL for Ollama
65
77
_client = OpenAI(base_url = " http://localhost:11434/v1/" , api_key = " ollama" )
66
- oc = OpenAIClient(model = " gemma:2b" , client = _client)
67
- giskard.llm.set_default_client(oc)
78
+
79
+ # Wrap the original OpenAI client with giskard OpenAI client and embedding
80
+ llm_client = OpenAIClient(model = " llama3.2" , client = _client)
81
+ embed_client = OpenAIEmbedding(model = " nomic-embed-text" , client = _client)
82
+
83
+ # Set the default client and embedding
84
+ giskard.llm.set_default_client(llm_client)
85
+ giskard.llm.embeddings.set_default_embedding(embed_client)
68
86
```
69
87
70
88
## Claude 3 Client Setup
@@ -78,28 +96,41 @@ import giskard
78
96
79
97
from giskard.llm.client.bedrock import ClaudeBedrockClient
80
98
from giskard.llm.embeddings.bedrock import BedrockEmbedding
81
- from giskard.llm.embeddings import set_default_embedding
82
99
100
+ # Create a Bedrock client
83
101
bedrock_runtime = boto3.client(" bedrock-runtime" , region_name = os.environ[" AWS_DEFAULT_REGION" ])
102
+
103
+ # Wrap the Beddock client with giskard Bedrock client and embedding
84
104
claude_client = ClaudeBedrockClient(bedrock_runtime, model = " anthropic.claude-3-haiku-20240307-v1:0" )
85
105
embed_client = BedrockEmbedding(bedrock_runtime, model = " amazon.titan-embed-text-v1" )
106
+
107
+ # Set the default client and embedding
86
108
giskard.llm.set_default_client(claude_client)
87
- set_default_embedding(embed_client)
109
+ giskard.llm.embeddings. set_default_embedding(embed_client)
88
110
```
89
111
90
112
## Gemini Client Setup
91
113
92
114
``` python
93
115
import os
94
116
import giskard
95
-
96
117
import google.generativeai as genai
97
-
98
118
from giskard.llm.client.gemini import GeminiClient
99
119
120
+ # Set the Gemini API key
121
+ os.environ[" GEMINI_API_KEY" ] = " …"
122
+
123
+ # Configure the Gemini API
100
124
genai.configure(api_key = os.environ[" GEMINI_API_KEY" ])
101
125
102
- giskard.llm.set_default_client(GeminiClient())
126
+ # Create a giskard Gemini client
127
+ gemini_client = GeminiClient()
128
+
129
+ # Set the default client
130
+ giskard.llm.set_default_client(gemini_client)
131
+
132
+ # You may also want to set the default embedding model
133
+ # Check the Custom Client Setup section for more details
103
134
```
104
135
105
136
## Custom Client Setup
@@ -110,7 +141,7 @@ from typing import Sequence, Optional
110
141
from giskard.llm.client import set_default_client
111
142
from giskard.llm.client.base import LLMClient, ChatMessage
112
143
113
-
144
+ # Create a custom client by extending the LLMClient class
114
145
class MyLLMClient (LLMClient ):
115
146
def __init__ (self , my_client ):
116
147
self ._client = my_client
@@ -155,7 +186,17 @@ class MyLLMClient(LLMClient):
155
186
156
187
return ChatMessage(role = " assistant" , message = data[" completion" ])
157
188
158
- set_default_client(MyLLMClient())
189
+ # Create an instance of the custom client
190
+ llm_client = MyLLMClient()
191
+
192
+ # Set the default client
193
+ set_default_client(llm_client)
194
+
195
+ # It's also possible to create a custom embedding class extending BaseEmbedding
196
+ # Or you can use FastEmbed for a pre-built embedding model:
197
+ from giskard.llm.embeddings.fastembed import try_get_fastembed_embeddings
198
+ embed_client = try_get_fastembed_embeddings()
199
+ giskard.llm.embeddings.set_default_embedding(embed_client)
159
200
```
160
201
161
202
If you run into any issues configuring the LLM client, don't hesitate to [ ask us on Discord] ( https://discord.com/invite/ABvfpbu69R ) or open a new issue on [ our GitHub repo] ( https://github.com/Giskard-AI/giskard ) .
0 commit comments