🤖 feat: GPT-4.1 (#6880)

* fix: Agent Builder setting not applying in useSideNavLinks

* fix: Remove unused type imports in useSideNavLinks

* feat: gpt-4.1

* fix: Update getCacheMultiplier and getMultiplier tests to use dynamic token values

* feat: Add gpt-4.1 to the list of vision models

* chore: Bump version of librechat-data-provider to 0.7.792
This commit is contained in:
Danny Avila 2025-04-14 14:55:59 -04:00 committed by GitHub
parent 64bd373bc8
commit 52b3ed54ca
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 224 additions and 48 deletions

View file

@ -80,6 +80,9 @@ const tokenValues = Object.assign(
'o1-mini': { prompt: 1.1, completion: 4.4 }, 'o1-mini': { prompt: 1.1, completion: 4.4 },
'o1-preview': { prompt: 15, completion: 60 }, 'o1-preview': { prompt: 15, completion: 60 },
o1: { prompt: 15, completion: 60 }, o1: { prompt: 15, completion: 60 },
'gpt-4.1-nano': { prompt: 0.1, completion: 0.4 },
'gpt-4.1-mini': { prompt: 0.4, completion: 1.6 },
'gpt-4.1': { prompt: 2, completion: 8 },
'gpt-4.5': { prompt: 75, completion: 150 }, 'gpt-4.5': { prompt: 75, completion: 150 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 }, 'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 2.5, completion: 10 }, 'gpt-4o': { prompt: 2.5, completion: 10 },
@ -183,6 +186,12 @@ const getValueKey = (model, endpoint) => {
return 'o1'; return 'o1';
} else if (modelName.includes('gpt-4.5')) { } else if (modelName.includes('gpt-4.5')) {
return 'gpt-4.5'; return 'gpt-4.5';
} else if (modelName.includes('gpt-4.1-nano')) {
return 'gpt-4.1-nano';
} else if (modelName.includes('gpt-4.1-mini')) {
return 'gpt-4.1-mini';
} else if (modelName.includes('gpt-4.1')) {
return 'gpt-4.1';
} else if (modelName.includes('gpt-4o-2024-05-13')) { } else if (modelName.includes('gpt-4o-2024-05-13')) {
return 'gpt-4o-2024-05-13'; return 'gpt-4o-2024-05-13';
} else if (modelName.includes('gpt-4o-mini')) { } else if (modelName.includes('gpt-4o-mini')) {

View file

@ -60,6 +60,30 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5'); expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5');
}); });
it('should return "gpt-4.1" for model type of "gpt-4.1"', () => {
expect(getValueKey('gpt-4.1-preview')).toBe('gpt-4.1');
expect(getValueKey('gpt-4.1-2024-08-06')).toBe('gpt-4.1');
expect(getValueKey('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1');
expect(getValueKey('openai/gpt-4.1')).toBe('gpt-4.1');
expect(getValueKey('openai/gpt-4.1-2024-08-06')).toBe('gpt-4.1');
expect(getValueKey('gpt-4.1-turbo')).toBe('gpt-4.1');
expect(getValueKey('gpt-4.1-0125')).toBe('gpt-4.1');
});
it('should return "gpt-4.1-mini" for model type of "gpt-4.1-mini"', () => {
expect(getValueKey('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini');
expect(getValueKey('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini');
expect(getValueKey('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini');
expect(getValueKey('gpt-4.1-mini-0125')).toBe('gpt-4.1-mini');
});
it('should return "gpt-4.1-nano" for model type of "gpt-4.1-nano"', () => {
expect(getValueKey('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano');
expect(getValueKey('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
expect(getValueKey('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano');
expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano');
});
it('should return "gpt-4o" for model type of "gpt-4o"', () => { it('should return "gpt-4o" for model type of "gpt-4o"', () => {
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o'); expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o'); expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
@ -185,6 +209,52 @@ describe('getMultiplier', () => {
); );
}); });
it('should return the correct multiplier for gpt-4.1', () => {
const valueKey = getValueKey('gpt-4.1-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4.1'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1'].completion,
);
expect(getMultiplier({ model: 'gpt-4.1-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-4.1'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-4.1', tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1'].completion,
);
});
it('should return the correct multiplier for gpt-4.1-mini', () => {
const valueKey = getValueKey('gpt-4.1-mini-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
tokenValues['gpt-4.1-mini'].prompt,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1-mini'].completion,
);
expect(getMultiplier({ model: 'gpt-4.1-mini-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-4.1-mini'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-4.1-mini', tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1-mini'].completion,
);
});
it('should return the correct multiplier for gpt-4.1-nano', () => {
const valueKey = getValueKey('gpt-4.1-nano-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
tokenValues['gpt-4.1-nano'].prompt,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1-nano'].completion,
);
expect(getMultiplier({ model: 'gpt-4.1-nano-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-4.1-nano'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-4.1-nano', tokenType: 'completion' })).toBe(
tokenValues['gpt-4.1-nano'].completion,
);
});
it('should return the correct multiplier for gpt-4o-mini', () => { it('should return the correct multiplier for gpt-4o-mini', () => {
const valueKey = getValueKey('gpt-4o-mini-2024-07-18'); const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe( expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
@ -348,9 +418,11 @@ describe('getCacheMultiplier', () => {
it('should derive the valueKey from the model if not provided', () => { it('should derive the valueKey from the model if not provided', () => {
expect(getCacheMultiplier({ cacheType: 'write', model: 'claude-3-5-sonnet-20240620' })).toBe( expect(getCacheMultiplier({ cacheType: 'write', model: 'claude-3-5-sonnet-20240620' })).toBe(
3.75, cacheTokenValues['claude-3-5-sonnet'].write,
);
expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(
cacheTokenValues['claude-3-haiku'].read,
); );
expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(0.03);
}); });
it('should return null if only model or cacheType is missing', () => { it('should return null if only model or cacheType is missing', () => {
@ -371,10 +443,10 @@ describe('getCacheMultiplier', () => {
}; };
expect( expect(
getCacheMultiplier({ model: 'custom-model', cacheType: 'write', endpointTokenConfig }), getCacheMultiplier({ model: 'custom-model', cacheType: 'write', endpointTokenConfig }),
).toBe(5); ).toBe(endpointTokenConfig['custom-model'].write);
expect( expect(
getCacheMultiplier({ model: 'custom-model', cacheType: 'read', endpointTokenConfig }), getCacheMultiplier({ model: 'custom-model', cacheType: 'read', endpointTokenConfig }),
).toBe(1); ).toBe(endpointTokenConfig['custom-model'].read);
}); });
it('should return null if model is not found in endpointTokenConfig', () => { it('should return null if model is not found in endpointTokenConfig', () => {
@ -395,13 +467,13 @@ describe('getCacheMultiplier', () => {
model: 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0', model: 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0',
cacheType: 'write', cacheType: 'write',
}), }),
).toBe(3.75); ).toBe(cacheTokenValues['claude-3-5-sonnet'].write);
expect( expect(
getCacheMultiplier({ getCacheMultiplier({
model: 'bedrock/anthropic.claude-3-haiku-20240307-v1:0', model: 'bedrock/anthropic.claude-3-haiku-20240307-v1:0',
cacheType: 'read', cacheType: 'read',
}), }),
).toBe(0.03); ).toBe(cacheTokenValues['claude-3-haiku'].read);
}); });
}); });
@ -488,46 +560,92 @@ describe('Grok Model Tests - Pricing', () => {
test('should return correct prompt and completion rates for Grok vision models', () => { test('should return correct prompt and completion rates for Grok vision models', () => {
const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest']; const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest'];
models.forEach((model) => { models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0); expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0); tokenValues['grok-2-vision'].prompt,
);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
tokenValues['grok-2-vision'].completion,
);
}); });
}); });
test('should return correct prompt and completion rates for Grok text models', () => { test('should return correct prompt and completion rates for Grok text models', () => {
const models = ['grok-2-1212', 'grok-2', 'grok-2-latest']; const models = ['grok-2-1212', 'grok-2', 'grok-2-latest'];
models.forEach((model) => { models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0); expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues['grok-2'].prompt);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0); expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
tokenValues['grok-2'].completion,
);
}); });
}); });
test('should return correct prompt and completion rates for Grok beta models', () => { test('should return correct prompt and completion rates for Grok beta models', () => {
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0); expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0); tokenValues['grok-vision-beta'].prompt,
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0); );
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0); expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(
tokenValues['grok-vision-beta'].completion,
);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(
tokenValues['grok-beta'].prompt,
);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(
tokenValues['grok-beta'].completion,
);
}); });
test('should return correct prompt and completion rates for Grok 3 models', () => { test('should return correct prompt and completion rates for Grok 3 models', () => {
expect(getMultiplier({ model: 'grok-3', tokenType: 'prompt' })).toBe(3.0); expect(getMultiplier({ model: 'grok-3', tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model: 'grok-3', tokenType: 'completion' })).toBe(15.0); tokenValues['grok-3'].prompt,
expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'prompt' })).toBe(5.0); );
expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'completion' })).toBe(25.0); expect(getMultiplier({ model: 'grok-3', tokenType: 'completion' })).toBe(
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'prompt' })).toBe(0.3); tokenValues['grok-3'].completion,
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'completion' })).toBe(0.5); );
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'prompt' })).toBe(0.4); expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'completion' })).toBe(4.0); tokenValues['grok-3-fast'].prompt,
);
expect(getMultiplier({ model: 'grok-3-fast', tokenType: 'completion' })).toBe(
tokenValues['grok-3-fast'].completion,
);
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'prompt' })).toBe(
tokenValues['grok-3-mini'].prompt,
);
expect(getMultiplier({ model: 'grok-3-mini', tokenType: 'completion' })).toBe(
tokenValues['grok-3-mini'].completion,
);
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'prompt' })).toBe(
tokenValues['grok-3-mini-fast'].prompt,
);
expect(getMultiplier({ model: 'grok-3-mini-fast', tokenType: 'completion' })).toBe(
tokenValues['grok-3-mini-fast'].completion,
);
}); });
test('should return correct prompt and completion rates for Grok 3 models with prefixes', () => { test('should return correct prompt and completion rates for Grok 3 models with prefixes', () => {
expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'prompt' })).toBe(3.0); expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'completion' })).toBe(15.0); tokenValues['grok-3'].prompt,
expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'prompt' })).toBe(5.0); );
expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'completion' })).toBe(25.0); expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'completion' })).toBe(
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'prompt' })).toBe(0.3); tokenValues['grok-3'].completion,
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'completion' })).toBe(0.5); );
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'prompt' })).toBe(0.4); expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'prompt' })).toBe(
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'completion' })).toBe(4.0); tokenValues['grok-3-fast'].prompt,
);
expect(getMultiplier({ model: 'xai/grok-3-fast', tokenType: 'completion' })).toBe(
tokenValues['grok-3-fast'].completion,
);
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'prompt' })).toBe(
tokenValues['grok-3-mini'].prompt,
);
expect(getMultiplier({ model: 'xai/grok-3-mini', tokenType: 'completion' })).toBe(
tokenValues['grok-3-mini'].completion,
);
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'prompt' })).toBe(
tokenValues['grok-3-mini-fast'].prompt,
);
expect(getMultiplier({ model: 'xai/grok-3-mini-fast', tokenType: 'completion' })).toBe(
tokenValues['grok-3-mini-fast'].completion,
);
}); });
}); });
}); });

View file

@ -14,6 +14,9 @@ const openAIModels = {
'gpt-4-1106': 127500, // -500 from max 'gpt-4-1106': 127500, // -500 from max
'gpt-4-0125': 127500, // -500 from max 'gpt-4-0125': 127500, // -500 from max
'gpt-4.5': 127500, // -500 from max 'gpt-4.5': 127500, // -500 from max
'gpt-4.1': 1047576,
'gpt-4.1-mini': 1047576,
'gpt-4.1-nano': 1047576,
'gpt-4o': 127500, // -500 from max 'gpt-4o': 127500, // -500 from max
'gpt-4o-mini': 127500, // -500 from max 'gpt-4o-mini': 127500, // -500 from max
'gpt-4o-2024-05-13': 127500, // -500 from max 'gpt-4o-2024-05-13': 127500, // -500 from max

View file

@ -113,6 +113,43 @@ describe('getModelMaxTokens', () => {
); );
}); });
test('should return correct tokens for gpt-4.1 matches', () => {
expect(getModelMaxTokens('gpt-4.1')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4.1']);
expect(getModelMaxTokens('gpt-4.1-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
);
expect(getModelMaxTokens('openai/gpt-4.1')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
);
expect(getModelMaxTokens('gpt-4.1-2024-08-06')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1'],
);
});
test('should return correct tokens for gpt-4.1-mini matches', () => {
expect(getModelMaxTokens('gpt-4.1-mini')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
);
expect(getModelMaxTokens('gpt-4.1-mini-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
);
expect(getModelMaxTokens('openai/gpt-4.1-mini')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-mini'],
);
});
test('should return correct tokens for gpt-4.1-nano matches', () => {
expect(getModelMaxTokens('gpt-4.1-nano')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
);
expect(getModelMaxTokens('gpt-4.1-nano-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
);
expect(getModelMaxTokens('openai/gpt-4.1-nano')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.1-nano'],
);
});
test('should return correct tokens for Anthropic models', () => { test('should return correct tokens for Anthropic models', () => {
const models = [ const models = [
'claude-2.1', 'claude-2.1',
@ -355,6 +392,25 @@ describe('matchModelName', () => {
expect(matchModelName('gpt-4-0125-vision-preview')).toBe('gpt-4-0125'); expect(matchModelName('gpt-4-0125-vision-preview')).toBe('gpt-4-0125');
}); });
it('should return the closest matching key for gpt-4.1 matches', () => {
expect(matchModelName('openai/gpt-4.1')).toBe('gpt-4.1');
expect(matchModelName('gpt-4.1-preview')).toBe('gpt-4.1');
expect(matchModelName('gpt-4.1-2024-08-06')).toBe('gpt-4.1');
expect(matchModelName('gpt-4.1-2024-08-06-0718')).toBe('gpt-4.1');
});
it('should return the closest matching key for gpt-4.1-mini matches', () => {
expect(matchModelName('openai/gpt-4.1-mini')).toBe('gpt-4.1-mini');
expect(matchModelName('gpt-4.1-mini-preview')).toBe('gpt-4.1-mini');
expect(matchModelName('gpt-4.1-mini-2024-08-06')).toBe('gpt-4.1-mini');
});
it('should return the closest matching key for gpt-4.1-nano matches', () => {
expect(matchModelName('openai/gpt-4.1-nano')).toBe('gpt-4.1-nano');
expect(matchModelName('gpt-4.1-nano-preview')).toBe('gpt-4.1-nano');
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
});
// Tests for Google models // Tests for Google models
it('should return the exact model name if it exists in maxTokensMap - Google models', () => { it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k'); expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');

View file

@ -62,8 +62,6 @@ const SidePanel = ({
() => getEndpointField(endpointsConfig, endpoint, 'type'), () => getEndpointField(endpointsConfig, endpoint, 'type'),
[endpoint, endpointsConfig], [endpoint, endpointsConfig],
); );
const assistants = useMemo(() => endpointsConfig?.[endpoint ?? ''], [endpoint, endpointsConfig]);
const agents = useMemo(() => endpointsConfig?.[endpoint ?? ''], [endpoint, endpointsConfig]);
const userProvidesKey = useMemo( const userProvidesKey = useMemo(
() => !!(endpointsConfig?.[endpoint ?? '']?.userProvide ?? false), () => !!(endpointsConfig?.[endpoint ?? '']?.userProvide ?? false),
@ -84,10 +82,8 @@ const SidePanel = ({
}, []); }, []);
const Links = useSideNavLinks({ const Links = useSideNavLinks({
agents,
endpoint, endpoint,
hidePanel, hidePanel,
assistants,
keyProvided, keyProvided,
endpointType, endpointType,
interfaceConfig, interfaceConfig,

View file

@ -8,7 +8,7 @@ import {
EModelEndpoint, EModelEndpoint,
Permissions, Permissions,
} from 'librechat-data-provider'; } from 'librechat-data-provider';
import type { TConfig, TInterfaceConfig, TEndpointsConfig } from 'librechat-data-provider'; import type { TInterfaceConfig, TEndpointsConfig } from 'librechat-data-provider';
import type { NavLink } from '~/common'; import type { NavLink } from '~/common';
import AgentPanelSwitch from '~/components/SidePanel/Agents/AgentPanelSwitch'; import AgentPanelSwitch from '~/components/SidePanel/Agents/AgentPanelSwitch';
import BookmarkPanel from '~/components/SidePanel/Bookmarks/BookmarkPanel'; import BookmarkPanel from '~/components/SidePanel/Bookmarks/BookmarkPanel';
@ -21,8 +21,6 @@ import { useHasAccess } from '~/hooks';
export default function useSideNavLinks({ export default function useSideNavLinks({
hidePanel, hidePanel,
assistants,
agents,
keyProvided, keyProvided,
endpoint, endpoint,
endpointType, endpointType,
@ -30,8 +28,6 @@ export default function useSideNavLinks({
endpointsConfig, endpointsConfig,
}: { }: {
hidePanel: () => void; hidePanel: () => void;
assistants?: TConfig | null;
agents?: TConfig | null;
keyProvided: boolean; keyProvided: boolean;
endpoint?: EModelEndpoint | null; endpoint?: EModelEndpoint | null;
endpointType?: EModelEndpoint | null; endpointType?: EModelEndpoint | null;
@ -59,8 +55,8 @@ export default function useSideNavLinks({
const links: NavLink[] = []; const links: NavLink[] = [];
if ( if (
isAssistantsEndpoint(endpoint) && isAssistantsEndpoint(endpoint) &&
assistants && endpointsConfig?.[EModelEndpoint.assistants] &&
assistants.disableBuilder !== true && endpointsConfig[EModelEndpoint.assistants].disableBuilder !== true &&
keyProvided keyProvided
) { ) {
links.push({ links.push({
@ -76,8 +72,7 @@ export default function useSideNavLinks({
endpointsConfig?.[EModelEndpoint.agents] && endpointsConfig?.[EModelEndpoint.agents] &&
hasAccessToAgents && hasAccessToAgents &&
hasAccessToCreateAgents && hasAccessToCreateAgents &&
agents && endpointsConfig[EModelEndpoint.agents].disableBuilder !== true
agents.disableBuilder !== true
) { ) {
links.push({ links.push({
title: 'com_sidepanel_agent_builder', title: 'com_sidepanel_agent_builder',
@ -141,13 +136,11 @@ export default function useSideNavLinks({
return links; return links;
}, [ }, [
endpointsConfig?.[EModelEndpoint.agents], endpointsConfig,
interfaceConfig.parameters, interfaceConfig.parameters,
keyProvided, keyProvided,
assistants,
endpointType, endpointType,
endpoint, endpoint,
agents,
hasAccessToAgents, hasAccessToAgents,
hasAccessToPrompts, hasAccessToPrompts,
hasAccessToBookmarks, hasAccessToBookmarks,

2
package-lock.json generated
View file

@ -42957,7 +42957,7 @@
}, },
"packages/data-provider": { "packages/data-provider": {
"name": "librechat-data-provider", "name": "librechat-data-provider",
"version": "0.7.791", "version": "0.7.792",
"license": "ISC", "license": "ISC",
"dependencies": { "dependencies": {
"axios": "^1.8.2", "axios": "^1.8.2",

View file

@ -1,6 +1,6 @@
{ {
"name": "librechat-data-provider", "name": "librechat-data-provider",
"version": "0.7.791", "version": "0.7.792",
"description": "data services for librechat apps", "description": "data services for librechat apps",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/index.es.js", "module": "dist/index.es.js",

View file

@ -857,6 +857,7 @@ export const visionModels = [
'gpt-4-turbo', 'gpt-4-turbo',
'gpt-4-vision', 'gpt-4-vision',
'o1', 'o1',
'gpt-4.1',
'gpt-4.5', 'gpt-4.5',
'llava', 'llava',
'llava-13b', 'llava-13b',