mirror of
https://github.com/danny-avila/LibreChat.git
synced 2025-12-16 16:30:15 +01:00
🌍 i18n: Update translation.json with latest translations (#8505)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
parent
dfef7c31d2
commit
ac94c73f23
1 changed files with 2 additions and 2 deletions
|
|
@ -198,6 +198,8 @@
|
|||
"com_endpoint_deprecated": "Deprecated",
|
||||
"com_endpoint_deprecated_info": "This endpoint is deprecated and may be removed in future versions, please use the agent endpoint instead",
|
||||
"com_endpoint_deprecated_info_a11y": "The plugin endpoint is deprecated and may be removed in future versions, please use the agent endpoint instead",
|
||||
"com_endpoint_disable_streaming": "Disable streaming responses and receive the complete response at once. Useful for models like o3 that require organization verification for streaming",
|
||||
"com_endpoint_disable_streaming_label": "Disable Streaming",
|
||||
"com_endpoint_examples": " Presets",
|
||||
"com_endpoint_export": "Export",
|
||||
"com_endpoint_export_share": "Export/Share",
|
||||
|
|
@ -231,7 +233,6 @@
|
|||
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
|
||||
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
|
||||
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
|
||||
"com_endpoint_disable_streaming": "Disable streaming responses and receive the complete response at once. Useful for models like o3 that require organization verification for streaming",
|
||||
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
|
||||
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
|
||||
|
|
@ -240,7 +241,6 @@
|
|||
"com_endpoint_output": "Output",
|
||||
"com_endpoint_plug_image_detail": "Image Detail",
|
||||
"com_endpoint_plug_resend_files": "Resend Files",
|
||||
"com_endpoint_disable_streaming_label": "Disable Streaming",
|
||||
"com_endpoint_plug_set_custom_instructions_for_gpt_placeholder": "Set custom instructions to include in System Message. Default: none",
|
||||
"com_endpoint_plug_skip_completion": "Skip Completion",
|
||||
"com_endpoint_plug_use_functions": "Use Functions",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue