mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
👓 feat: Vision Support for Assistants (#2195)
* refactor(assistants/chat): use promises to speed up initialization, initialize shared variables, include `attachedFileIds` to streamRunManager * chore: additional typedefs * fix(OpenAIClient): handle edge case where attachments promise is resolved * feat: createVisionPrompt * feat: Vision Support for Assistants
This commit is contained in:
parent
1f0fb497f8
commit
798e8763d0
16 changed files with 376 additions and 100 deletions
|
|
@ -468,21 +468,28 @@ async function checkMessageGaps({ openai, latestMessageId, thread_id, run_id, co
|
|||
|
||||
/**
|
||||
* Records token usage for a given completion request.
|
||||
*
|
||||
* @param {Object} params - The parameters for initializing a thread.
|
||||
* @param {number} params.prompt_tokens - The number of prompt tokens used.
|
||||
* @param {number} params.completion_tokens - The number of completion tokens used.
|
||||
* @param {string} params.model - The model used by the assistant run.
|
||||
* @param {string} params.user - The user's ID.
|
||||
* @param {string} params.conversationId - LibreChat conversation ID.
|
||||
* @param {string} [params.context='message'] - The context of the usage. Defaults to 'message'.
|
||||
* @return {Promise<TMessage[]>} A promise that resolves to the updated messages
|
||||
*/
|
||||
const recordUsage = async ({ prompt_tokens, completion_tokens, model, user, conversationId }) => {
|
||||
const recordUsage = async ({
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
model,
|
||||
user,
|
||||
conversationId,
|
||||
context = 'message',
|
||||
}) => {
|
||||
await spendTokens(
|
||||
{
|
||||
user,
|
||||
model,
|
||||
context: 'message',
|
||||
context,
|
||||
conversationId,
|
||||
},
|
||||
{ promptTokens: prompt_tokens, completionTokens: completion_tokens },
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue