🧩 fix: Missing Memory Agent Assignment for Matching IDs (#11514)
Some checks failed
Docker Dev Branch Images Build / build (Dockerfile, lc-dev, node) (push) Waiting to run
Docker Dev Branch Images Build / build (Dockerfile.multi, lc-dev-api, api-build) (push) Waiting to run
Docker Dev Images Build / build (Dockerfile, librechat-dev, node) (push) Has been cancelled
Docker Dev Images Build / build (Dockerfile.multi, librechat-dev-api, api-build) (push) Has been cancelled
Sync Locize Translations & Create Translation PR / Sync Translation Keys with Locize (push) Has been cancelled
Sync Locize Translations & Create Translation PR / Create Translation PR on Version Published (push) Has been cancelled

* fix: `useMemory` in AgentClient for PrelimAgent Assignment

* Updated the useMemory method in AgentClient to handle prelimAgent assignment based on memory configuration.
* Added logic to return early if prelimAgent is undefined, improving flow control.
* Introduced comprehensive unit tests to validate behavior for various memory configurations, including scenarios for matching and differing agent IDs, as well as handling of ephemeral agents.
* Mocked necessary dependencies in tests to ensure isolation and reliability of the new functionality.

* fix: Update temperature handling for Bedrock and Anthropic providers in memory management

* fix: Replace hardcoded provider strings with constants in memory agent tests

* fix: Replace hardcoded provider string with constant in allowedProviders for AgentClient

* fix: memory agent tests to use actual Providers and GraphEvents constants
This commit is contained in:
Danny Avila 2026-01-25 12:08:52 -05:00 committed by GitHub
parent 6a49861861
commit 0b4deac953
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 378 additions and 20 deletions

View file

@ -616,6 +616,8 @@ class AgentClient extends BaseClient {
agent_id: memoryConfig.agent.id,
endpoint: EModelEndpoint.agents,
});
} else if (memoryConfig.agent?.id != null) {
prelimAgent = this.options.agent;
} else if (
memoryConfig.agent?.id == null &&
memoryConfig.agent?.model != null &&
@ -630,6 +632,10 @@ class AgentClient extends BaseClient {
);
}
if (!prelimAgent) {
return;
}
const agent = await initializeAgent(
{
req: this.options.req,

View file

@ -12,6 +12,17 @@ jest.mock('@librechat/agents', () => ({
jest.mock('@librechat/api', () => ({
...jest.requireActual('@librechat/api'),
checkAccess: jest.fn(),
initializeAgent: jest.fn(),
createMemoryProcessor: jest.fn(),
}));
jest.mock('~/models/Agent', () => ({
loadAgent: jest.fn(),
}));
jest.mock('~/models/Role', () => ({
getRoleByName: jest.fn(),
}));
// Mock getMCPManager
@ -2070,4 +2081,179 @@ describe('AgentClient - titleConvo', () => {
expect(client.options.agent.instructions).toContain(memoryContent);
});
});
describe('useMemory method - prelimAgent assignment', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
let mockCheckAccess;
let mockLoadAgent;
let mockInitializeAgent;
let mockCreateMemoryProcessor;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
instructions: 'Test instructions',
model: 'gpt-4',
model_parameters: {
model: 'gpt-4',
},
};
mockReq = {
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
config: {
memory: {
agent: {
id: 'agent-123',
},
},
endpoints: {
[EModelEndpoint.agents]: {
allowedProviders: [EModelEndpoint.openAI],
},
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
mockCheckAccess = require('@librechat/api').checkAccess;
mockLoadAgent = require('~/models/Agent').loadAgent;
mockInitializeAgent = require('@librechat/api').initializeAgent;
mockCreateMemoryProcessor = require('@librechat/api').createMemoryProcessor;
});
it('should use current agent when memory config agent.id matches current agent id', async () => {
mockCheckAccess.mockResolvedValue(true);
mockInitializeAgent.mockResolvedValue({
...mockAgent,
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).not.toHaveBeenCalled();
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: mockAgent,
}),
expect.any(Object),
);
});
it('should load different agent when memory config agent.id differs from current agent id', async () => {
const differentAgentId = 'different-agent-456';
const differentAgent = {
id: differentAgentId,
provider: EModelEndpoint.openAI,
model: 'gpt-4',
instructions: 'Different agent instructions',
};
mockReq.config.memory.agent.id = differentAgentId;
mockCheckAccess.mockResolvedValue(true);
mockLoadAgent.mockResolvedValue(differentAgent);
mockInitializeAgent.mockResolvedValue({
...differentAgent,
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent_id: differentAgentId,
}),
);
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: differentAgent,
}),
expect.any(Object),
);
});
it('should return early when prelimAgent is undefined (no valid memory agent config)', async () => {
mockReq.config.memory = {
agent: {},
};
mockCheckAccess.mockResolvedValue(true);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
const result = await client.useMemory();
expect(result).toBeUndefined();
expect(mockInitializeAgent).not.toHaveBeenCalled();
expect(mockCreateMemoryProcessor).not.toHaveBeenCalled();
});
it('should create ephemeral agent when no id but model and provider are specified', async () => {
mockReq.config.memory = {
agent: {
model: 'gpt-4',
provider: EModelEndpoint.openAI,
},
};
mockCheckAccess.mockResolvedValue(true);
mockInitializeAgent.mockResolvedValue({
id: Constants.EPHEMERAL_AGENT_ID,
model: 'gpt-4',
provider: EModelEndpoint.openAI,
});
mockCreateMemoryProcessor.mockResolvedValue([undefined, jest.fn()]);
client = new AgentClient(mockOptions);
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
await client.useMemory();
expect(mockLoadAgent).not.toHaveBeenCalled();
expect(mockInitializeAgent).toHaveBeenCalledWith(
expect.objectContaining({
agent: expect.objectContaining({
id: Constants.EPHEMERAL_AGENT_ID,
model: 'gpt-4',
provider: EModelEndpoint.openAI,
}),
}),
expect.any(Object),
);
});
});
});

View file

@ -1,5 +1,5 @@
import { Types } from 'mongoose';
import { Run } from '@librechat/agents';
import { Run, Providers } from '@librechat/agents';
import type { IUser } from '@librechat/data-schemas';
import type { Response } from 'express';
import { processMemory } from './memory';
@ -37,20 +37,18 @@ jest.mock('~/utils', () => ({
const { createSafeUser } = jest.requireMock('~/utils');
jest.mock('@librechat/agents', () => ({
Run: {
create: jest.fn(() => ({
processStream: jest.fn(() => Promise.resolve('success')),
})),
},
Providers: {
OPENAI: 'openai',
BEDROCK: 'bedrock',
},
GraphEvents: {
TOOL_END: 'tool_end',
},
}));
jest.mock('@librechat/agents', () => {
const actual = jest.requireActual('@librechat/agents');
return {
Run: {
create: jest.fn(() => ({
processStream: jest.fn(() => Promise.resolve('success')),
})),
},
Providers: actual.Providers,
GraphEvents: actual.GraphEvents,
};
});
function createTestUser(overrides: Partial<IUser> = {}): IUser {
return {
@ -255,7 +253,7 @@ describe('Memory Agent Header Resolution', () => {
it('should not throw when llmConfig has no configuration', async () => {
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
};
@ -288,7 +286,7 @@ describe('Memory Agent Header Resolution', () => {
} as unknown as Partial<IUser>);
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
configuration: {
defaultHeaders: {
@ -324,7 +322,7 @@ describe('Memory Agent Header Resolution', () => {
it('should include instructions in user message for Bedrock provider', async () => {
const llmConfig = {
provider: 'bedrock',
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
};
@ -356,7 +354,7 @@ describe('Memory Agent Header Resolution', () => {
it('should pass instructions to graphConfig for non-Bedrock providers', async () => {
const llmConfig = {
provider: 'openai',
provider: Providers.OPENAI,
model: 'gpt-4o-mini',
};
@ -382,4 +380,161 @@ describe('Memory Agent Header Resolution', () => {
expect(runConfig.graphConfig.instructions).toBe('test instructions');
expect(runConfig.graphConfig.additional_instructions).toBeDefined();
});
it('should set temperature to 1 for Bedrock with thinking enabled', async () => {
const llmConfig = {
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-sonnet-4-20250514-v1:0',
temperature: 0.7,
additionalModelRequestFields: {
thinking: {
type: 'enabled',
budget_tokens: 5000,
},
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(1);
});
it('should not modify temperature for Bedrock without thinking enabled', async () => {
const llmConfig = {
provider: Providers.BEDROCK,
model: 'us.anthropic.claude-haiku-4-5-20251001-v1:0',
temperature: 0.7,
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
it('should remove temperature for Anthropic with thinking enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
thinking: {
type: 'enabled',
budget_tokens: 5000,
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBeUndefined();
expect(runConfig.graphConfig.llmConfig.thinking).toEqual({
type: 'enabled',
budget_tokens: 5000,
});
});
it('should not modify temperature for Anthropic without thinking enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
it('should not modify temperature for Anthropic with thinking type not enabled', async () => {
const llmConfig = {
provider: Providers.ANTHROPIC,
model: 'claude-sonnet-4-20250514',
temperature: 0.7,
thinking: {
type: 'disabled',
},
};
await processMemory({
res: mockRes,
userId: 'user-123',
setMemory: mockMemoryMethods.setMemory,
deleteMemory: mockMemoryMethods.deleteMemory,
messages: [],
memory: 'existing memory',
messageId: 'msg-123',
conversationId: 'conv-123',
validKeys: ['preferences'],
instructions: 'test instructions',
llmConfig,
user: testUser,
});
expect(Run.create as jest.Mock).toHaveBeenCalled();
const runConfig = (Run.create as jest.Mock).mock.calls[0][0];
expect(runConfig.graphConfig.llmConfig.temperature).toBe(0.7);
});
});

View file

@ -369,7 +369,6 @@ ${memory ?? 'No existing memories'}`;
}
}
// Handle Bedrock with thinking enabled - temperature must be 1
const bedrockConfig = finalLLMConfig as {
additionalModelRequestFields?: { thinking?: unknown };
temperature?: number;
@ -382,6 +381,18 @@ ${memory ?? 'No existing memories'}`;
(finalLLMConfig as unknown as Record<string, unknown>).temperature = 1;
}
const anthropicConfig = finalLLMConfig as {
thinking?: { type?: string };
temperature?: number;
};
if (
llmConfig?.provider === Providers.ANTHROPIC &&
anthropicConfig.thinking?.type === 'enabled' &&
anthropicConfig.temperature != null
) {
delete (finalLLMConfig as Record<string, unknown>).temperature;
}
const llmConfigWithHeaders = finalLLMConfig as OpenAIClientOptions;
if (llmConfigWithHeaders?.configuration?.defaultHeaders != null) {
llmConfigWithHeaders.configuration.defaultHeaders = resolveHeaders({