🐳 feat: RAG for Default Docker Compose Files + Docs Update (#2246)

* refactor(deploy-compose.yml): use long-syntax to avoid implicit folder creation of librechat.yaml

* refactor(docker-compose.override.yml.example): use long-syntax to avoid implicit folder creation of librechat.yaml

* chore: add simple health check for RAG_API_URL

* chore: improve axios error handling, adding `logAxiosError`

* chore: more informative message detailing RAG_API_URL path

* feat: add rag_api and vectordb to default compose file

* chore(rag.yml): update standalone rag compose file to use RAG_PORT

* chore: documentation updates

* docs: Update rag_api.md with images

* Update rag_api.md

* Update rag_api.md, assistants clarification

* add RAG API note to breaking changes
This commit is contained in:
Danny Avila 2024-03-29 21:15:36 -04:00 committed by GitHub
parent 6a6b2e79b0
commit 56ea0f9ae7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 306 additions and 74 deletions

View file

@ -1,11 +1,8 @@
const axios = require('axios');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
const { extractBaseURL, inputSchema, processModelData } = require('~/utils');
const { extractBaseURL, inputSchema, processModelData, logAxiosError } = require('~/utils');
const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
// const { getAzureCredentials, genAzureChatCompletion } = require('~/utils/');
const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService').config;
@ -77,29 +74,7 @@ const fetchModels = async ({
models = input.data.map((item) => item.id);
} catch (error) {
const logMessage = `Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`;
if (error.response) {
logger.error(
`${logMessage} The request was made and the server responded with a status code that falls out of the range of 2xx: ${
error.message ? error.message : ''
}`,
{
headers: error.response.headers,
status: error.response.status,
data: error.response.data,
},
);
} else if (error.request) {
logger.error(
`${logMessage} The request was made but no response was received: ${
error.message ? error.message : ''
}`,
{
request: error.request,
},
);
} else {
logger.error(`${logMessage} Something happened in setting up the request`, error);
}
logAxiosError({ message: logMessage, error });
}
return models;