From 03100c7b5ceda99d97f1b2f2dd1a5926ada86b7f Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 14:40:41 -0500 Subject: [PATCH 1/7] fix(bun): fix bun compatibility to allow gzip header: https://github.com/oven-sh/bun/issues/267#issuecomment-1854460357 --- api/server/index.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api/server/index.js b/api/server/index.js index dd8a8ac7c2e..e9b46c8e32e 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -2,6 +2,7 @@ require('dotenv').config(); const path = require('path'); require('module-alias')({ base: path.resolve(__dirname, '..') }); const cors = require('cors'); +const axios = require('axios'); const express = require('express'); const passport = require('passport'); const mongoSanitize = require('express-mongo-sanitize'); @@ -22,6 +23,9 @@ const port = Number(PORT) || 3080; const host = HOST || 'localhost'; const startServer = async () => { + if (typeof Bun !== 'undefined') { + axios.defaults.headers.common['Accept-Encoding'] = 'gzip'; + } await connectDb(); logger.info('Connected to MongoDB'); await indexSync(); From 7de82f496d478f948ea58ce81d88b886facff1aa Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 14:41:49 -0500 Subject: [PATCH 2/7] chore: update custom config examples --- docs/install/configuration/ai_endpoints.md | 7 ++-- docs/install/configuration/custom_config.md | 36 ++++++++------------- librechat.example.yaml | 17 +++++----- 3 files changed, 25 insertions(+), 35 deletions(-) diff --git a/docs/install/configuration/ai_endpoints.md b/docs/install/configuration/ai_endpoints.md index 36294078879..e03a2800192 100644 --- a/docs/install/configuration/ai_endpoints.md +++ b/docs/install/configuration/ai_endpoints.md @@ -35,7 +35,6 @@ Some of the endpoints are marked as **Known,** which means they might have speci ] fetch: false titleConvo: true - titleMethod: "completion" titleModel: "mixtral-8x7b-32768" modelDisplayLabel: "groq" iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/groq.png" @@ -64,7 +63,6 @@ Some of the endpoints are marked as **Known,** which means they might have speci default: ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"] fetch: true titleConvo: true - titleMethod: "completion" titleModel: "mistral-tiny" modelDisplayLabel: "Mistral" # Drop Default params parameters from the request. See default params in guide linked below. @@ -81,7 +79,7 @@ Some of the endpoints are marked as **Known,** which means they might have speci - **Known:** icon provided, fetching list of models is recommended as API token rates and pricing used for token credit balances when models are fetched. -- API may be strict for some models, and may not allow fields like `stop`, in which case, you should use [`dropParams`.](./custom_config.md#dropparams) +- It's recommended, and for some models required, to use [`dropParams`](./custom_config.md#dropparams) to drop the `stop` as Openrouter models use a variety of stop tokens. - Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. @@ -95,9 +93,10 @@ Some of the endpoints are marked as **Known,** which means they might have speci default: ["gpt-3.5-turbo"] fetch: true titleConvo: true - titleMethod: "completion" titleModel: "gpt-3.5-turbo" # change to your preferred model modelDisplayLabel: "OpenRouter" + # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. + dropParams: ["stop"] ``` ![image](https://github.com/danny-avila/LibreChat/assets/110412045/c4a0415e-732c-46af-82a6-3598663b7f42) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index 3b84e7086ff..dab98a68935 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -80,18 +80,18 @@ fileConfig: fileLimit: 5 fileSizeLimit: 10 # Maximum size for an individual file in MB totalSizeLimit: 50 # Maximum total size for all files in a single request in MB - supportedMimeTypes: - - "image/.*" - - "application/pdf" + # supportedMimeTypes: # In case you wish to limit certain filetypes + # - "image/.*" + # - "application/pdf" openAI: disabled: true # Disables file uploading to the OpenAI endpoint default: totalSizeLimit: 20 - YourCustomEndpointName: - fileLimit: 2 - fileSizeLimit: 5 + # YourCustomEndpointName: # Example for custom endpoints + # fileLimit: 2 + # fileSizeLimit: 5 serverFileSizeLimit: 100 # Global server file size limit in MB - avatarSizeLimit: 2 # Limit for user avatar image size in MB + avatarSizeLimit: 4 # Limit for user avatar image size in MB, default: 2 MB rateLimits: fileUploads: ipMax: 100 @@ -116,19 +116,15 @@ endpoints: apiKey: "${MISTRAL_API_KEY}" baseURL: "https://api.mistral.ai/v1" models: - default: ["mistral-tiny", "mistral-small", "mistral-medium"] + default: ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"] fetch: true # Attempt to dynamically fetch available models userIdQuery: false iconURL: "https://example.com/mistral-icon.png" titleConvo: true - titleMethod: "completion" titleModel: "mistral-tiny" - summarize: true - summaryModel: "mistral-summary" - forcePrompt: false modelDisplayLabel: "Mistral AI" - addParams: - safe_prompt: true + # addParams: + # safe_prompt: true # Mistral specific value for moderating messages dropParams: - "stop" - "user" @@ -144,10 +140,9 @@ endpoints: fetch: false titleConvo: true titleModel: "gpt-3.5-turbo" - summarize: false - forcePrompt: false modelDisplayLabel: "OpenRouter" dropParams: + - "stop" - "frequency_penalty" ``` @@ -521,15 +516,12 @@ endpoints: apiKey: "${YOUR_ENV_VAR_KEY}" baseURL: "https://api.mistral.ai/v1" models: - default: ["mistral-tiny", "mistral-small", "mistral-medium"] + default: ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest"] titleConvo: true titleModel: "mistral-tiny" - summarize: false - summaryModel: "mistral-tiny" - forcePrompt: false modelDisplayLabel: "Mistral" - addParams: - safe_prompt: true + # addParams: + # safe_prompt: true # Mistral specific value for moderating messages # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] ``` diff --git a/librechat.example.yaml b/librechat.example.yaml index cd2befdb96b..48b00ac772e 100644 --- a/librechat.example.yaml +++ b/librechat.example.yaml @@ -68,26 +68,26 @@ endpoints: titleConvo: true # Set to true to enable title conversation # Title Method: Choose between "completion" or "functions". - titleMethod: "completion" # Defaults to "completion" if omitted. + # titleMethod: "completion" # Defaults to "completion" if omitted. # Title Model: Specify the model to use for titles. titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. # Summarize setting: Set to true to enable summarization. - summarize: false + # summarize: false # Summary Model: Specify the model to use if summarization is enabled. - summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. + # summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. - forcePrompt: false + # forcePrompt: false # The label displayed for the AI model in messages. modelDisplayLabel: "Mistral" # Default is "AI" when not set. # Add additional parameters to the request. Default params will be overwritten. - addParams: - safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ + # addParams: + # safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ # Drop Default params parameters from the request. See default params in guide linked below. # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error: @@ -105,9 +105,8 @@ endpoints: fetch: true titleConvo: true titleModel: "gpt-3.5-turbo" - summarize: false - summaryModel: "gpt-3.5-turbo" - forcePrompt: false + # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. + dropParams: ["stop"] modelDisplayLabel: "OpenRouter" # See the Custom Configuration Guide for more information: From eb7f407100984e5333644a1186f6eae685c5ac58 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 14:44:48 -0500 Subject: [PATCH 3/7] fix(OpenAIClient.chatCompletion): remove redundant call of stream.controller.abort() as `break` aborts the request and prevents abort errors when not called redundantly --- api/app/clients/OpenAIClient.js | 1 - 1 file changed, 1 deletion(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 7ddf7ec2f24..0e5024832fc 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -1154,7 +1154,6 @@ ${convo} intermediateReply += token; onProgress(token); if (abortController.signal.aborted) { - stream.controller.abort(); break; } } From c94acd11f7db01af9052bdcdc993e8785c96672a Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 14:46:14 -0500 Subject: [PATCH 4/7] chore: bump bun.lockb --- bun.lockb | Bin 884469 -> 884469 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/bun.lockb b/bun.lockb index 63642a5aa245f58c5e5e2fbb28a2ecc687db5b6e..ad0a321f652b67be36d93aae02c533b0d6624457 100755 GIT binary patch delta 129 zcmezR*YxXO(}ott7N!>F7M2#)7Pc+yDlasd85kJkfOr!JkN|>bYzz#HKw2J%w?M_7 zgT%J`zF?2mVKd}oU@+#}zD|VWl^|yTkP`;PX4@C4amed1xv{n%3gQ4_P9Ww2Vs0Sj L*?uU9*H{Mt0{tL} delta 129 zcmezR*YxXO(}ott7N!>F7M2#)7Pc+yDlasd7#JAjfOr!JkN|?GYzz#HKw2J%w?M_7 z0mYcM`@Ue0)?pLjWnd8F-M&tQ Date: Tue, 27 Feb 2024 17:18:33 -0500 Subject: [PATCH 5/7] fix: remove result-thinking class when message is no longer streaming --- client/src/components/Chat/Messages/Content/Markdown.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/src/components/Chat/Messages/Content/Markdown.tsx b/client/src/components/Chat/Messages/Content/Markdown.tsx index 1a80449ba8e..12608ee87e6 100644 --- a/client/src/components/Chat/Messages/Content/Markdown.tsx +++ b/client/src/components/Chat/Messages/Content/Markdown.tsx @@ -10,7 +10,7 @@ import rehypeHighlight from 'rehype-highlight'; import type { TMessage } from 'librechat-data-provider'; import type { PluggableList } from 'unified'; import CodeBlock from '~/components/Messages/Content/CodeBlock'; -import { langSubset, validateIframe, processLaTeX } from '~/utils'; +import { cn, langSubset, validateIframe, processLaTeX } from '~/utils'; import { useChatContext } from '~/Providers'; import store from '~/store'; @@ -75,7 +75,7 @@ const Markdown = memo(({ content, message, showCursor }: TContentProps) => { return (

- +

); From 34111d0f73616350759b388ec16c61773c960bb1 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 17:20:09 -0500 Subject: [PATCH 6/7] fix(bun): improve Bun support by forcing use of old method in bun env, also update old methods with new customizable params --- api/app/clients/ChatGPTClient.js | 157 ++++++++++++++++++++++++++----- api/app/clients/OpenAIClient.js | 3 +- package.json | 1 + 3 files changed, 139 insertions(+), 22 deletions(-) diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js index c1ae54fdf08..0dd74f180bd 100644 --- a/api/app/clients/ChatGPTClient.js +++ b/api/app/clients/ChatGPTClient.js @@ -1,9 +1,16 @@ -const crypto = require('crypto'); const Keyv = require('keyv'); +const crypto = require('crypto'); +const { + EModelEndpoint, + resolveHeaders, + mapModelToAzureConfig, +} = require('librechat-data-provider'); const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source'); const { Agent, ProxyAgent } = require('undici'); const BaseClient = require('./BaseClient'); +const { logger } = require('~/config'); +const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils'); const CHATGPT_MODEL = 'gpt-3.5-turbo'; const tokenizersCache = {}; @@ -144,7 +151,8 @@ class ChatGPTClient extends BaseClient { if (!abortController) { abortController = new AbortController(); } - const modelOptions = { ...this.modelOptions }; + + let modelOptions = { ...this.modelOptions }; if (typeof onProgress === 'function') { modelOptions.stream = true; } @@ -159,56 +167,165 @@ class ChatGPTClient extends BaseClient { } const { debug } = this.options; - const url = this.completionsUrl; + let baseURL = this.completionsUrl; if (debug) { console.debug(); - console.debug(url); + console.debug(baseURL); console.debug(modelOptions); console.debug(); } - if (this.azure || this.options.azure) { - // Azure does not accept `model` in the body, so we need to remove it. - delete modelOptions.model; - } - const opts = { method: 'POST', headers: { 'Content-Type': 'application/json', }, - body: JSON.stringify(modelOptions), dispatcher: new Agent({ bodyTimeout: 0, headersTimeout: 0, }), }; - if (this.apiKey && this.options.azure) { - opts.headers['api-key'] = this.apiKey; + if (this.isVisionModel) { + modelOptions.max_tokens = 4000; + } + + /** @type {TAzureConfig | undefined} */ + const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI]; + + const isAzure = this.azure || this.options.azure; + if ( + (isAzure && this.isVisionModel && azureConfig) || + (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI) + ) { + const { modelGroupMap, groupMap } = azureConfig; + const { + azureOptions, + baseURL, + headers = {}, + serverless, + } = mapModelToAzureConfig({ + modelName: modelOptions.model, + modelGroupMap, + groupMap, + }); + opts.headers = resolveHeaders(headers); + this.langchainProxy = extractBaseURL(baseURL); + this.apiKey = azureOptions.azureOpenAIApiKey; + + const groupName = modelGroupMap[modelOptions.model].group; + this.options.addParams = azureConfig.groupMap[groupName].addParams; + this.options.dropParams = azureConfig.groupMap[groupName].dropParams; + // Note: `forcePrompt` not re-assigned as only chat models are vision models + + this.azure = !serverless && azureOptions; + this.azureEndpoint = + !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this); + } + + if (this.options.headers) { + opts.headers = { ...opts.headers, ...this.options.headers }; + } + + if (isAzure) { + // Azure does not accept `model` in the body, so we need to remove it. + delete modelOptions.model; + + baseURL = this.langchainProxy + ? constructAzureURL({ + baseURL: this.langchainProxy, + azure: this.azure, + }) + : this.azureEndpoint.split(/\/(chat|completion)/)[0]; + + if (this.options.forcePrompt) { + baseURL += '/completions'; + } else { + baseURL += '/chat/completions'; + } + + opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion }; + opts.headers = { ...opts.headers, 'api-key': this.apiKey }; } else if (this.apiKey) { opts.headers.Authorization = `Bearer ${this.apiKey}`; } + if (process.env.OPENAI_ORGANIZATION) { + opts.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION; + } + if (this.useOpenRouter) { opts.headers['HTTP-Referer'] = 'https://librechat.ai'; opts.headers['X-Title'] = 'LibreChat'; } - if (this.options.headers) { - opts.headers = { ...opts.headers, ...this.options.headers }; - } - if (this.options.proxy) { opts.dispatcher = new ProxyAgent(this.options.proxy); } + /* hacky fixes for Mistral AI API: + - Re-orders system message to the top of the messages payload, as not allowed anywhere else + - If there is only one message and it's a system message, change the role to user + */ + if (baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) { + const { messages } = modelOptions; + + const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system'); + + if (systemMessageIndex > 0) { + const [systemMessage] = messages.splice(systemMessageIndex, 1); + messages.unshift(systemMessage); + } + + modelOptions.messages = messages; + + if (messages.length === 1 && messages[0].role === 'system') { + modelOptions.messages[0].role = 'user'; + } + } + + if (this.options.addParams && typeof this.options.addParams === 'object') { + modelOptions = { + ...modelOptions, + ...this.options.addParams, + }; + logger.debug('[ChatGPTClient] chatCompletion: added params', { + addParams: this.options.addParams, + modelOptions, + }); + } + + if (this.options.dropParams && Array.isArray(this.options.dropParams)) { + this.options.dropParams.forEach((param) => { + delete modelOptions[param]; + }); + logger.debug('[ChatGPTClient] chatCompletion: dropped params', { + dropParams: this.options.dropParams, + modelOptions, + }); + } + + if (baseURL.includes('v1') && !baseURL.includes('/chat/completions')) { + baseURL = baseURL.split('v1')[0] + 'v1/chat/completions'; + } + + const BASE_URL = new URL(baseURL); + if (opts.defaultQuery) { + Object.entries(opts.defaultQuery).forEach(([key, value]) => { + BASE_URL.searchParams.append(key, value); + }); + delete opts.defaultQuery; + } + + const completionsURL = BASE_URL.toString(); + opts.body = JSON.stringify(modelOptions); + if (modelOptions.stream) { // eslint-disable-next-line no-async-promise-executor return new Promise(async (resolve, reject) => { try { let done = false; - await fetchEventSource(url, { + await fetchEventSource(completionsURL, { ...opts, signal: abortController.signal, async onopen(response) { @@ -236,7 +353,6 @@ class ChatGPTClient extends BaseClient { // workaround for private API not sending [DONE] event if (!done) { onProgress('[DONE]'); - abortController.abort(); resolve(); } }, @@ -249,14 +365,13 @@ class ChatGPTClient extends BaseClient { }, onmessage(message) { if (debug) { - // console.debug(message); + console.debug(message); } if (!message.data || message.event === 'ping') { return; } if (message.data === '[DONE]') { onProgress('[DONE]'); - abortController.abort(); resolve(); done = true; return; @@ -269,7 +384,7 @@ class ChatGPTClient extends BaseClient { } }); } - const response = await fetch(url, { + const response = await fetch(completionsURL, { ...opts, signal: abortController.signal, }); diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 0e5024832fc..34cd9db0620 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -560,7 +560,7 @@ class OpenAIClient extends BaseClient { let streamResult = null; this.modelOptions.user = this.user; const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null; - const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion); + const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined'); if (typeof opts.onProgress === 'function' && useOldMethod) { await this.getCompletion( payload, @@ -1154,6 +1154,7 @@ ${convo} intermediateReply += token; onProgress(token); if (abortController.signal.aborted) { + stream.controller.abort(); break; } } diff --git a/package.json b/package.json index cc3e4db2cba..ff6100751a2 100644 --- a/package.json +++ b/package.json @@ -49,6 +49,7 @@ "lint": "eslint \"{,!(node_modules)/**/}*.{js,jsx,ts,tsx}\"", "format": "prettier-eslint --write \"{,!(node_modules)/**/}*.{js,jsx,ts,tsx}\"", "b:api": "NODE_ENV=production bun run api/server/index.js", + "b:api-inspect": "NODE_ENV=production bun --inspect run api/server/index.js", "b:api:dev": "NODE_ENV=production bun run --watch api/server/index.js", "b:data": "cd packages/data-provider && bun run b:build", "b:client": "bun --bun run b:data && cd client && bun --bun run b:build", From cb8ea71920045e404a2e31bf4fdf747650d4d0cf Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 27 Feb 2024 17:47:51 -0500 Subject: [PATCH 7/7] fix(ci): pass tests --- api/app/clients/ChatGPTClient.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js index 0dd74f180bd..a5ed43985e2 100644 --- a/api/app/clients/ChatGPTClient.js +++ b/api/app/clients/ChatGPTClient.js @@ -305,7 +305,13 @@ class ChatGPTClient extends BaseClient { }); } - if (baseURL.includes('v1') && !baseURL.includes('/chat/completions')) { + if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) { + baseURL = baseURL.split('v1')[0] + 'v1/completions'; + } else if ( + baseURL.includes('v1') && + !baseURL.includes('/chat/completions') && + this.isChatCompletion + ) { baseURL = baseURL.split('v1')[0] + 'v1/chat/completions'; }