refactor: Use librechat-data-provider app-wide 🔄 (#1326)

* chore: bump vite, vitejs/plugin-react, mark client package as esm, move react-query as a peer dep in data-provider

* chore: import changes due to new data-provider export strategy, also fix type imports where applicable

* chore: export react-query services as separate to avoid react dependencies in /api/

* chore: suppress sourcemap warnings and polyfill node:path which is used by filenamify
TODO: replace filenamify with an alternative and REMOVE polyfill

* chore: /api/ changes to support `librechat-data-provider`

* refactor: rewrite Dockerfile.multi in light of /api/ changes to support `librechat-data-provider`

* chore: remove volume mapping to node_modules directories in default compose file

* chore: remove schemas from /api/ as is no longer needed with use of `librechat-data-provider`

* fix(ci): jest `librechat-data-provider/react-query` module resolution
This commit is contained in:
Danny Avila 2023-12-11 14:48:40 -05:00 committed by GitHub
parent d4c846b543
commit df1dfa7d46
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
97 changed files with 1831 additions and 1343 deletions

View file

@ -1,6 +1,6 @@
const Anthropic = require('@anthropic-ai/sdk');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { getResponseSender, EModelEndpoint } = require('~/server/services/Endpoints');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');

View file

@ -4,11 +4,7 @@ const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
getResponseSender,
EModelEndpoint,
endpointSettings,
} = require('~/server/services/Endpoints');
const { getResponseSender, EModelEndpoint, endpointSettings } = require('librechat-data-provider');
const { getModelMaxTokens } = require('~/utils');
const { formatMessage } = require('./prompts');
const BaseClient = require('./BaseClient');

View file

@ -1,7 +1,7 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { getResponseSender, EModelEndpoint } = require('~/server/services/Endpoints');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils');
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');

View file

@ -3,7 +3,7 @@ const { CallbackManager } = require('langchain/callbacks');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
const { EModelEndpoint } = require('~/server/services/Endpoints');
const { EModelEndpoint } = require('librechat-data-provider');
const { formatLangChainMessages } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
const { SelfReflectionTool } = require('./tools');

View file

@ -1,5 +1,5 @@
const { promptTokensEstimate } = require('openai-chat-tokens');
const { EModelEndpoint } = require('~/server/services/Endpoints');
const { EModelEndpoint } = require('librechat-data-provider');
const { formatFromLangChain } = require('~/app/clients/prompts');
const checkBalance = require('~/models/checkBalance');
const { isEnabled } = require('~/server/utils');