🪙 feat: Configure Max Context and Output Tokens (#2648)

* chore: make frequent 'error' log into 'debug' log

* feat: add maxContextTokens as a conversation field

* refactor(settings): increase popover height

* feat: add DynamicInputNumber and maxContextTokens to all endpoints that support it (frontend), fix schema

* feat: maxContextTokens handling (backend)

* style: revert popover height

* feat: max tokens

* fix: Ollama Vision firebase compatibility

* fix: Ollama Vision, use message_file_map to determine multimodal request

* refactor: bring back MobileNav and improve title styling
This commit is contained in:
Danny Avila 2024-05-09 13:27:13 -04:00 committed by GitHub
parent 5293b73b6d
commit 6ba7f60eec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 420 additions and 22 deletions

View file

@ -1,5 +1,14 @@
const buildOptions = (endpoint, parsedBody) => {
const { modelLabel, promptPrefix, resendFiles, iconURL, greeting, spec, ...rest } = parsedBody;
const {
modelLabel,
promptPrefix,
maxContextTokens,
resendFiles,
iconURL,
greeting,
spec,
...rest
} = parsedBody;
const endpointOption = {
endpoint,
modelLabel,
@ -8,6 +17,7 @@ const buildOptions = (endpoint, parsedBody) => {
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions: {
...rest,
},

View file

@ -1,6 +1,15 @@
const buildOptions = (endpoint, parsedBody, endpointType) => {
const { chatGptLabel, promptPrefix, resendFiles, imageDetail, iconURL, greeting, spec, ...rest } =
parsedBody;
const {
chatGptLabel,
promptPrefix,
maxContextTokens,
resendFiles,
imageDetail,
iconURL,
greeting,
spec,
...rest
} = parsedBody;
const endpointOption = {
endpoint,
endpointType,
@ -11,6 +20,7 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions: {
...rest,
},

View file

@ -7,6 +7,7 @@ const buildOptions = (endpoint, parsedBody) => {
iconURL,
greeting,
spec,
maxContextTokens,
...modelOptions
} = parsedBody;
const endpointOption = {
@ -21,6 +22,7 @@ const buildOptions = (endpoint, parsedBody) => {
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions,
};

View file

@ -1,6 +1,15 @@
const buildOptions = (endpoint, parsedBody) => {
const { chatGptLabel, promptPrefix, resendFiles, imageDetail, iconURL, greeting, spec, ...rest } =
parsedBody;
const {
chatGptLabel,
promptPrefix,
maxContextTokens,
resendFiles,
imageDetail,
iconURL,
greeting,
spec,
...rest
} = parsedBody;
const endpointOption = {
endpoint,
chatGptLabel,
@ -10,6 +19,7 @@ const buildOptions = (endpoint, parsedBody) => {
iconURL,
greeting,
spec,
maxContextTokens,
modelOptions: {
...rest,
},

View file

@ -23,7 +23,7 @@ async function fetchImageToBase64(url) {
}
}
const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic]);
const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic, 'Ollama', 'ollama']);
/**
* Encodes and formats the given files.