From e9e109f709bce05b2cc58281761a3abdfed9f41b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=B6rd=C5=91s=20Andr=C3=A1s?= Date: Mon, 27 Jan 2025 23:55:50 +0100 Subject: [PATCH 1/2] Proposal init --- independent-publisher-connectors/Deepseek/readme.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 independent-publisher-connectors/Deepseek/readme.md diff --git a/independent-publisher-connectors/Deepseek/readme.md b/independent-publisher-connectors/Deepseek/readme.md new file mode 100644 index 0000000000..de1a01f66b --- /dev/null +++ b/independent-publisher-connectors/Deepseek/readme.md @@ -0,0 +1,7 @@ +# Deepseek + +Deepseek is providing various advanced LLMs used for tasks such as maths, coding and natural language reasoning. + +## Publisher: Fördős András + +*NOTE: This is a proposal, work in progress* \ No newline at end of file From c0ff5af0f03635b3549c2f43ce36e7368b5199ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=B6rd=C5=91s=20Andr=C3=A1s?= Date: Mon, 17 Feb 2025 00:19:45 +0100 Subject: [PATCH 2/2] Add deepseek --- .../Deepseek/apiDefinition.swagger.json | 394 ++++++++++++++++++ .../Deepseek/apiProperties.json | 24 ++ .../Deepseek/readme.md | 19 +- .../Deepseek/script.csx | 22 + 4 files changed, 458 insertions(+), 1 deletion(-) create mode 100644 independent-publisher-connectors/Deepseek/apiDefinition.swagger.json create mode 100644 independent-publisher-connectors/Deepseek/apiProperties.json create mode 100644 independent-publisher-connectors/Deepseek/script.csx diff --git a/independent-publisher-connectors/Deepseek/apiDefinition.swagger.json b/independent-publisher-connectors/Deepseek/apiDefinition.swagger.json new file mode 100644 index 0000000000..c002dc8637 --- /dev/null +++ b/independent-publisher-connectors/Deepseek/apiDefinition.swagger.json @@ -0,0 +1,394 @@ +{ + "swagger": "2.0", + "info": { + "title": "Default title", + "description": "Deepseek is providing various advanced LLMs used for tasks such as maths, coding and natural language reasoning.", + "version": "1.0", + "contact": { + "name": "Fördős András", + "email": "fordosa90+ipc_deep@gmail.com" + } + }, + "host": "api.deepseek.com", + "basePath": "/", + "schemes": [ + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [], + "paths": { + "/chat/completions": { + "post": { + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "A unique identifier for the chat completion.", + "title": "ID" + }, + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "finish_reason": { + "type": "string", + "description": "The reason the model stopped generating tokens.", + "title": "Finish reason" + }, + "index": { + "type": "integer", + "format": "int32", + "description": "The index of the choice in the list of choices.", + "title": "Index" + }, + "message": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The contents of the message.", + "title": "Content" + }, + "role": { + "type": "string", + "description": "The role of the author of this message.", + "title": "Role" + }, + "reasoning_content": { + "type": "string", + "description": "The reasoning contents of the assistant message, before the final answer.", + "title": "Reasoning content" + } + }, + "description": "A chat completion message generated by the model.", + "title": "Message" + }, + "logprobs": { + "type": "object", + "description": "Log probability information for the choice.", + "title": "Log probability" + } + } + }, + "description": "A list of chat completion choices.", + "title": "Choices" + }, + "created": { + "type": "integer", + "format": "int32", + "description": "The Unix timestamp (in seconds) of when the chat completion was created.", + "title": "Created" + }, + "model": { + "type": "string", + "description": "The model used for the chat completion.", + "title": "Model" + }, + "system_fingerprint": { + "type": "string", + "description": "This fingerprint represents the backend configuration that the model runs with.", + "title": "System fingerprint" + }, + "object": { + "type": "string", + "description": "The object type.", + "title": "Object" + }, + "usage": { + "type": "object", + "properties": { + "completion_tokens": { + "type": "integer", + "format": "int32", + "description": "Number of tokens in the generated completion.", + "title": "Tokens (completion)" + }, + "prompt_tokens": { + "type": "integer", + "format": "int32", + "description": "Number of tokens in the prompt. It equals prompt_cache_hit_tokens + prompt_cache_miss_tokens.", + "title": "Tokens (prompt)" + }, + "total_tokens": { + "type": "integer", + "format": "int32", + "description": "Total number of tokens used in the request (prompt + completion).", + "title": "Tokens (total)" + }, + "prompt_cache_hit_tokens": { + "type": "integer", + "format": "int32", + "description": "Number of tokens in the prompt that hits the context cache.", + "title": "Tokens (prompt cache hit)" + }, + "prompt_cache_miss_tokens": { + "type": "integer", + "format": "int32", + "description": "Number of tokens in the prompt that misses the context cache.", + "title": "Tokens (prompt cache miss)" + }, + "completion_tokens_details": { + "type": "object", + "description": "Breakdown of tokens used in a completion.", + "title": "Token details", + "properties": { + "reasoning_tokens": { + "type": "integer", + "description": "Tokens generated by the model for reasoning.", + "title": "Reasoning tokens", + "format": "int32" + } + } + } + }, + "description": "Usage statistics for the completion request.", + "title": "Usage" + } + } + } + } + }, + "description": "Creates a model response for the given chat conversation.", + "summary": "Chat Completion", + "operationId": "ChatCompletion", + "parameters": [ + { + "name": "body", + "in": "body", + "required": false, + "schema": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The contents of the system message.", + "title": "Content" + }, + "role": { + "type": "string", + "description": "The role of the messages author, eg 'system'.", + "title": "Role" + }, + "name": { + "type": "string", + "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", + "title": "Name" + } + } + }, + "description": "A list of messages comprising the conversation so far.", + "title": "Messages" + }, + "model": { + "type": "string", + "description": "ID of the model to use. You can use deepseek-chat.", + "title": "Model" + }, + "frequency_penalty": { + "type": "integer", + "format": "int32", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", + "title": "Penalty (frequency)" + }, + "max_tokens": { + "type": "integer", + "format": "int32", + "description": "Integer between 1 and 8192. The maximum number of tokens that can be generated in the chat completion, defaulted to 4096.", + "title": "Max tokens" + }, + "presence_penalty": { + "type": "integer", + "format": "int32", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", + "title": "Penalty (presence)" + }, + "response_format": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Type of response.", + "title": "Type", + "enum": [ + "text", + "json_object" + ] + } + }, + "description": "An object specifying the format that the model must output.", + "title": "Response format" + }, + "temperature": { + "type": "integer", + "format": "int32", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", + "title": "Temperature" + }, + "top_p": { + "type": "integer", + "format": "int32", + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.", + "title": "Top_p" + }, + "logprobs": { + "type": "boolean", + "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.", + "title": "Log probabilities" + }, + "top_logprobs": { + "type": "string", + "description": "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.", + "title": "Top logprobs" + } + } + } + } + ] + } + }, + "/user/balance": { + "get": { + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "is_available": { + "type": "boolean", + "description": "Whether the user's balance is sufficient for API calls.", + "title": "Is available" + }, + "balance_infos": { + "type": "array", + "items": { + "type": "object", + "properties": { + "currency": { + "type": "string", + "description": "The currency of the balance.", + "title": "Currency" + }, + "total_balance": { + "type": "string", + "description": "The total available balance, including the granted balance and the topped-up balance.", + "title": "Balance (total)" + }, + "granted_balance": { + "type": "string", + "description": "The total not expired granted balance.", + "title": "Balance (granted)" + }, + "topped_up_balance": { + "type": "string", + "description": "The total topped-up balance.", + "title": "Balance (topped up))" + } + } + }, + "description": "Details of balance.", + "title": "Balance info" + } + } + } + } + }, + "summary": "Get Balance", + "operationId": "GetBalance", + "description": "Get user current balance.", + "parameters": [] + } + }, + "/models": { + "get": { + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "properties": { + "object": { + "type": "string", + "description": "Object.", + "title": "Object", + "x-ms-visibility": "internal" + }, + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The model identifier, which can be referenced in the API endpoints.", + "title": "ID" + }, + "object": { + "type": "string", + "description": "The object type, which is always \"model\".", + "title": "Object" + }, + "owned_by": { + "type": "string", + "description": "The organization that owns the model.", + "title": "Owned by" + } + } + }, + "description": "Array of detailed data.", + "title": "Data" + } + } + } + } + }, + "summary": "List Models", + "operationId": "ListModels", + "description": "Lists the currently available models, and provides basic information about each one such as the owner and availability.", + "parameters": [] + } + } + }, + "definitions": {}, + "parameters": {}, + "responses": {}, + "securityDefinitions": { + "api_key": { + "type": "apiKey", + "in": "header", + "name": "api_key" + } + }, + "security": [ + { + "api_key": [] + } + ], + "tags": [], + "x-ms-connector-metadata": [ + { + "propertyName": "Website", + "propertyValue": "https://platform.deepseek.com/" + }, + { + "propertyName": "Privacy policy", + "propertyValue": "https://cdn.deepseek.com/policies/en-US/deepseek-privacy-policy.html" + }, + { + "propertyName": "Categories", + "propertyValue": "AI" + } + ] +} \ No newline at end of file diff --git a/independent-publisher-connectors/Deepseek/apiProperties.json b/independent-publisher-connectors/Deepseek/apiProperties.json new file mode 100644 index 0000000000..678978ddc3 --- /dev/null +++ b/independent-publisher-connectors/Deepseek/apiProperties.json @@ -0,0 +1,24 @@ +{ + "properties": { + "connectionParameters": { + "api_key": { + "type": "securestring", + "uiDefinition": { + "displayName": "API Key", + "description": "The API Key for this api", + "tooltip": "Provide your API Key", + "constraints": { + "tabIndex": 2, + "clearText": false, + "required": "true" + } + } + } + }, + "iconBrandColor": "#da3b01", + "capabilities": [], + "policyTemplateInstances": [], + "publisher": "Fördős András", + "stackOwner": "Hangzhou DeepSeek Artificial Intelligence Co., Ltd." + } +} \ No newline at end of file diff --git a/independent-publisher-connectors/Deepseek/readme.md b/independent-publisher-connectors/Deepseek/readme.md index de1a01f66b..9fefe53640 100644 --- a/independent-publisher-connectors/Deepseek/readme.md +++ b/independent-publisher-connectors/Deepseek/readme.md @@ -4,4 +4,21 @@ Deepseek is providing various advanced LLMs used for tasks such as maths, coding ## Publisher: Fördős András -*NOTE: This is a proposal, work in progress* \ No newline at end of file +## Prerequisites + +Register for a deepseek account on (https://platform.deepseek.com/)[https://platform.deepseek.com/] so that you cnan generate an API Key. + +## Supported Operations + +### Chat completion +Creates a model response for the given chat conversation. + +### Get balance +Get user current balance. + +### Lit models +Lists the currently available models, and provides basic information about each one such as the owner and availability. + +## Known Issues and Limitations + +There are currently no known limitations of the connector itself, however the underlying service might pose a few. Please make sure to read through its documentation. If something is missing from the connector, please get in touch. \ No newline at end of file diff --git a/independent-publisher-connectors/Deepseek/script.csx b/independent-publisher-connectors/Deepseek/script.csx new file mode 100644 index 0000000000..6808f8fcdc --- /dev/null +++ b/independent-publisher-connectors/Deepseek/script.csx @@ -0,0 +1,22 @@ +public class Script : ScriptBase +{ + public override async Task ExecuteAsync() + { + addBearerPrefix(); + + var response = await Context.SendAsync(Context.Request, CancellationToken) + .ConfigureAwait(continueOnCapturedContext: false); + + return response; + } + + private void addBearerPrefix() { + var headerValuesToken = Context.Request.Headers.GetValues("api_key"); + var accessToken = headerValuesToken.FirstOrDefault(); + + Context.Request.Headers.Remove("api_key"); + Context.Request.Headers.Remove("Authorization"); + + Context.Request.Headers.TryAddWithoutValidation("Authorization", "Bearer " + accessToken); + } +} \ No newline at end of file