🚀 feat: o1 Tool Calling & reasoning_effort (#5553)

* fix: Update @librechat/agents to version 1.9.98

* feat: o1 tool calling

* fix: Improve error logging in RouteErrorBoundary

* refactor: Move extractContent function to utils and clean up Artifact component

* refactor: optimize reasoning UI post-streaming and deprecate plugins rendering

* feat: reasoning_effort support

* fix: update request content type handling in openapiToFunction to remove default 'application/x-www-form-urlencoded'

* chore: bump v0.7.696 data-provider
This commit is contained in:
Danny Avila 2025-01-30 12:36:35 -05:00 committed by GitHub
parent 591a019766
commit 587d46a20b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 752 additions and 685 deletions

View file

@ -578,6 +578,7 @@ export default {
com_endpoint_top_k: 'Top K',
com_endpoint_max_output_tokens: 'Max Output Tokens',
com_endpoint_stop: 'Stop Sequences',
com_endpoint_reasoning_effort: 'Reasoning Effort',
com_endpoint_stop_placeholder: 'Separate values by pressing `Enter`',
com_endpoint_openai_max_tokens: `Optional \`max_tokens\` field, representing the maximum number of tokens that can be generated in the chat completion.
@ -596,6 +597,8 @@ export default {
'Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.',
com_endpoint_openai_resend_files:
'Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.',
com_endpoint_openai_reasoning_effort:
'o1 models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.',
com_endpoint_openai_detail:
'The resolution for Vision requests. "Low" is cheaper and faster, "High" is more detailed and expensive, and "Auto" will automatically choose between the two based on the image resolution.',
com_endpoint_openai_stop: 'Up to 4 sequences where the API will stop generating further tokens.',