diff --git a/app/appearance/langs/en_US.json b/app/appearance/langs/en_US.json index dbce9d042..186779fad 100644 --- a/app/appearance/langs/en_US.json +++ b/app/appearance/langs/en_US.json @@ -297,6 +297,8 @@ "apiMaxTokensTip": "The max_tokens parameter passed in when requesting the API is used to control the length of the generated text", "apiTemperature": "Temperature", "apiTemperatureTip": "The temperature parameter passed in when requesting the API is used to control the randomness of the generated text", + "apiMaxContexts": "Maximum number of contexts", + "apiMaxContextsTip": "The maximum number of contexts passed in when requesting the API", "apiBaseURL": "API Base URL", "apiBaseURLTip": "The base address of the request, such as https://api.openai.com/v1", "apiUserAgentTip": "The user agent that initiated the request, that is, the HTTP header User-Agent", diff --git a/app/appearance/langs/es_ES.json b/app/appearance/langs/es_ES.json index 6e785f2af..be488258e 100644 --- a/app/appearance/langs/es_ES.json +++ b/app/appearance/langs/es_ES.json @@ -297,6 +297,8 @@ "apiMaxTokensTip": "El parámetro max_tokens que se pasa al solicitar la API se usa para controlar la longitud del texto generado", "apiTemperature": "Temperatura", "apiTemperatureTip": "El parámetro temperature pasado al solicitar la API se utiliza para controlar la aleatoriedad del texto generado", + "apiMaxContexts": "Número máximo de contextos", + "apiMaxContextsTip": "El número máximo de contextos pasados ​​al solicitar la API", "apiBaseURL": "URL base de la API", "apiBaseURLTip": "La dirección base de la solicitud, como https://api.openai.com/v1", "apiUserAgentTip": "El agente de usuario que inició la solicitud, es decir, el encabezado HTTP User-Agent", diff --git a/app/appearance/langs/fr_FR.json b/app/appearance/langs/fr_FR.json index c809674c0..58a7db0cc 100644 --- a/app/appearance/langs/fr_FR.json +++ b/app/appearance/langs/fr_FR.json @@ -297,6 +297,8 @@ "apiMaxTokensTip": "Le paramètre max_tokens transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré", "apiTemperature": "Température", "apiTemperatureTip": "Le paramètre temperature transmis lors de la requête à l'API est utilisé pour contrôler le caractère aléatoire du texte généré", + "apiMaxContexts": "Nombre maximum de contextes", + "apiMaxContextsTip": "Le nombre maximum de contextes transmis lors de la requête de l'API", "apiBaseURL": "URL de base de l'API", "apiBaseURLTip": "L'adresse de base de la requête, telle que https://api.openai.com/v1", "apiUserAgentTip": "L'agent utilisateur qui a initié la requête, c'est-à-dire l'en-tête HTTP User-Agent", diff --git a/app/appearance/langs/zh_CHT.json b/app/appearance/langs/zh_CHT.json index 266615518..49ecd5032 100644 --- a/app/appearance/langs/zh_CHT.json +++ b/app/appearance/langs/zh_CHT.json @@ -297,6 +297,8 @@ "apiMaxTokensTip": "請求 API 時傳入的 max_tokens 參數,用於控制生成的文字長度", "apiTemperature": "溫度", "apiTemperatureTip": "請求 API 時傳入的 temperature 參數,用來控制產生的文字隨機性", + "apiMaxContexts": "最大上下文數", + "apiMaxContextsTip": "請求 API 時傳入的最大上下文數", "apiBaseURL": "API 基礎地址", "apiBaseURLTip": "發起請求的基礎地址,如 https://api.openai.com/v1", "apiUserAgentTip": "發起請求的使用者代理,即 HTTP 標頭 User-Agent", diff --git a/app/appearance/langs/zh_CN.json b/app/appearance/langs/zh_CN.json index b7967d706..137d64808 100644 --- a/app/appearance/langs/zh_CN.json +++ b/app/appearance/langs/zh_CN.json @@ -297,6 +297,8 @@ "apiMaxTokensTip": "请求 API 时传入的 max_tokens 参数,用于控制生成的文本长度", "apiTemperature": "温度", "apiTemperatureTip": "请求 API 时传入的 temperature 参数,用于控制生成的文本随机性", + "apiMaxContexts": "最大上下文数", + "apiMaxContextsTip": "请求 API 时传入的最大上下文数", "apiBaseURL": "API 基础地址", "apiBaseURLTip": "发起请求的基础地址,如 https://api.openai.com/v1", "apiVersion": "API 版本", diff --git a/app/src/config/ai.ts b/app/src/config/ai.ts index 47f642e8d..9285613fb 100644 --- a/app/src/config/ai.ts +++ b/app/src/config/ai.ts @@ -35,6 +35,12 @@ export const ai = {
${window.siyuan.languages.apiTemperatureTip}
+
+ ${window.siyuan.languages.apiMaxContexts} +
+ +
${window.siyuan.languages.apiMaxContextsTip}
+
${window.siyuan.languages.apiModel}
@@ -110,6 +116,14 @@ export const ai = {
+
+
+ ${window.siyuan.languages.apiMaxContexts} +
${window.siyuan.languages.apiMaxContextsTip}
+
+ + +
${window.siyuan.languages.apiModel} @@ -191,6 +205,7 @@ export const ai = { apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value, apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value), apiTemperature: parseFloat((ai.element.querySelector("#apiTemperature") as HTMLInputElement).value), + apiMaxContexts: parseInt((ai.element.querySelector("#apiMaxContexts") as HTMLInputElement).value), apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value, apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value), apiProvider: (ai.element.querySelector("#apiProvider") as HTMLSelectElement).value, diff --git a/app/src/types/index.d.ts b/app/src/types/index.d.ts index 0f059fc79..3412b3b8b 100644 --- a/app/src/types/index.d.ts +++ b/app/src/types/index.d.ts @@ -739,6 +739,7 @@ interface IConfig { apiModel: string apiMaxTokens: number apiTemperature: number + apiMaxContexts: number apiProxy: string apiTimeout: number }, diff --git a/kernel/api/setting.go b/kernel/api/setting.go index 754e814dc..ce251892d 100644 --- a/kernel/api/setting.go +++ b/kernel/api/setting.go @@ -198,6 +198,10 @@ func setAI(c *gin.Context) { ai.OpenAI.APITemperature = 1.0 } + if 1 > ai.OpenAI.APIMaxContexts || 64 < ai.OpenAI.APIMaxContexts { + ai.OpenAI.APIMaxContexts = 7 + } + model.Conf.AI = ai model.Conf.Save() diff --git a/kernel/conf/ai.go b/kernel/conf/ai.go index 2c154ef1d..e8b5bc4bb 100644 --- a/kernel/conf/ai.go +++ b/kernel/conf/ai.go @@ -35,6 +35,7 @@ type OpenAI struct { APIModel string `json:"apiModel"` APIMaxTokens int `json:"apiMaxTokens"` APITemperature float64 `json:"apiTemperature"` + APIMaxContexts int `json:"apiMaxContexts"` APIBaseURL string `json:"apiBaseURL"` APIUserAgent string `json:"apiUserAgent"` APIProvider string `json:"apiProvider"` // OpenAI, Azure @@ -43,11 +44,13 @@ type OpenAI struct { func NewAI() *AI { openAI := &OpenAI{ - APITimeout: 30, - APIModel: openai.GPT3Dot5Turbo, - APIBaseURL: "https://api.openai.com/v1", - APIUserAgent: util.UserAgent, - APIProvider: "OpenAI", + APITemperature: 1.0, + APIMaxContexts: 7, + APITimeout: 30, + APIModel: openai.GPT3Dot5Turbo, + APIBaseURL: "https://api.openai.com/v1", + APIUserAgent: util.UserAgent, + APIProvider: "OpenAI", } openAI.APIKey = os.Getenv("SIYUAN_OPENAI_API_KEY") @@ -77,6 +80,13 @@ func NewAI() *AI { } } + if maxContexts := os.Getenv("SIYUAN_OPENAI_API_MAX_CONTEXTS"); "" != maxContexts { + maxContextsInt, err := strconv.Atoi(maxContexts) + if nil == err { + openAI.APIMaxContexts = maxContextsInt + } + } + if baseURL := os.Getenv("SIYUAN_OPENAI_API_BASE_URL"); "" != baseURL { openAI.APIBaseURL = baseURL } diff --git a/kernel/model/ai.go b/kernel/model/ai.go index 9a0c3e64a..a64348000 100644 --- a/kernel/model/ai.go +++ b/kernel/model/ai.go @@ -84,8 +84,8 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str util.PushEndlessProgress("Requesting...") defer util.ClearPushProgress(100) - if 7 < len(contextMsgs) { - contextMsgs = contextMsgs[len(contextMsgs)-7:] + if Conf.AI.OpenAI.APIMaxContexts < len(contextMsgs) { + contextMsgs = contextMsgs[len(contextMsgs)-Conf.AI.OpenAI.APIMaxContexts:] } var gpt GPT @@ -96,7 +96,7 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str } buf := &bytes.Buffer{} - for i := 0; i < 7; i++ { + for i := 0; i < Conf.AI.OpenAI.APIMaxContexts; i++ { part, stop, chatErr := gpt.chat(msg, contextMsgs) buf.WriteString(part) diff --git a/kernel/model/conf.go b/kernel/model/conf.go index 329245a80..c2cac4b80 100644 --- a/kernel/model/conf.go +++ b/kernel/model/conf.go @@ -420,6 +420,9 @@ func InitConf() { if 0 >= Conf.AI.OpenAI.APITemperature || 2 < Conf.AI.OpenAI.APITemperature { Conf.AI.OpenAI.APITemperature = 1.0 } + if 1 > Conf.AI.OpenAI.APIMaxContexts || 64 < Conf.AI.OpenAI.APIMaxContexts { + Conf.AI.OpenAI.APIMaxContexts = 7 + } if "" != Conf.AI.OpenAI.APIKey { logging.LogInfof("OpenAI API enabled\n"+ @@ -429,14 +432,16 @@ func InitConf() { " proxy=%s\n"+ " model=%s\n"+ " maxTokens=%d\n"+ - " temperature=%.1f", + " temperature=%.1f\n"+ + " maxContexts=%d", Conf.AI.OpenAI.APIUserAgent, Conf.AI.OpenAI.APIBaseURL, Conf.AI.OpenAI.APITimeout, Conf.AI.OpenAI.APIProxy, Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIMaxTokens, - Conf.AI.OpenAI.APITemperature) + Conf.AI.OpenAI.APITemperature, + Conf.AI.OpenAI.APIMaxContexts) } Conf.ReadOnly = util.ReadOnly