🎨 AI supports configuration randomness and context number https://github.com/siyuan-note/siyuan/issues/10660

This commit is contained in:
Daniel 2024-03-20 11:45:22 +08:00
parent 41b4984de3
commit 5300638622
No known key found for this signature in database
GPG key ID: 86211BA83DF03017
11 changed files with 55 additions and 10 deletions

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "The <code class='fn__code'>max_tokens</code> parameter passed in when requesting the API is used to control the length of the generated text", "apiMaxTokensTip": "The <code class='fn__code'>max_tokens</code> parameter passed in when requesting the API is used to control the length of the generated text",
"apiTemperature": "Temperature", "apiTemperature": "Temperature",
"apiTemperatureTip": "The <code class='fn__code'>temperature</code> parameter passed in when requesting the API is used to control the randomness of the generated text", "apiTemperatureTip": "The <code class='fn__code'>temperature</code> parameter passed in when requesting the API is used to control the randomness of the generated text",
"apiMaxContexts": "Maximum number of contexts",
"apiMaxContextsTip": "The maximum number of contexts passed in when requesting the API",
"apiBaseURL": "API Base URL", "apiBaseURL": "API Base URL",
"apiBaseURLTip": "The base address of the request, such as <code class='fn__code'>https://api.openai.com/v1</code>", "apiBaseURLTip": "The base address of the request, such as <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "The user agent that initiated the request, that is, the HTTP header <code class='fn__code'>User-Agent</code>", "apiUserAgentTip": "The user agent that initiated the request, that is, the HTTP header <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "El parámetro <code class='fn__code'>max_tokens</code> que se pasa al solicitar la API se usa para controlar la longitud del texto generado", "apiMaxTokensTip": "El parámetro <code class='fn__code'>max_tokens</code> que se pasa al solicitar la API se usa para controlar la longitud del texto generado",
"apiTemperature": "Temperatura", "apiTemperature": "Temperatura",
"apiTemperatureTip": "El parámetro <code class='fn__code'>temperature</code> pasado al solicitar la API se utiliza para controlar la aleatoriedad del texto generado", "apiTemperatureTip": "El parámetro <code class='fn__code'>temperature</code> pasado al solicitar la API se utiliza para controlar la aleatoriedad del texto generado",
"apiMaxContexts": "Número máximo de contextos",
"apiMaxContextsTip": "El número máximo de contextos pasados al solicitar la API",
"apiBaseURL": "URL base de la API", "apiBaseURL": "URL base de la API",
"apiBaseURLTip": "La dirección base de la solicitud, como <code class='fn__code'>https://api.openai.com/v1</code>", "apiBaseURLTip": "La dirección base de la solicitud, como <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "El agente de usuario que inició la solicitud, es decir, el encabezado HTTP <code class='fn__code'>User-Agent</code>", "apiUserAgentTip": "El agente de usuario que inició la solicitud, es decir, el encabezado HTTP <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "Le paramètre <code class='fn__code'>max_tokens</code> transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré", "apiMaxTokensTip": "Le paramètre <code class='fn__code'>max_tokens</code> transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré",
"apiTemperature": "Température", "apiTemperature": "Température",
"apiTemperatureTip": "Le paramètre <code class='fn__code'>temperature</code> transmis lors de la requête à l'API est utilisé pour contrôler le caractère aléatoire du texte généré", "apiTemperatureTip": "Le paramètre <code class='fn__code'>temperature</code> transmis lors de la requête à l'API est utilisé pour contrôler le caractère aléatoire du texte généré",
"apiMaxContexts": "Nombre maximum de contextes",
"apiMaxContextsTip": "Le nombre maximum de contextes transmis lors de la requête de l'API",
"apiBaseURL": "URL de base de l'API", "apiBaseURL": "URL de base de l'API",
"apiBaseURLTip": "L'adresse de base de la requête, telle que <code class='fn__code'>https://api.openai.com/v1</code>", "apiBaseURLTip": "L'adresse de base de la requête, telle que <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "L'agent utilisateur qui a initié la requête, c'est-à-dire l'en-tête HTTP <code class='fn__code'>User-Agent</code>", "apiUserAgentTip": "L'agent utilisateur qui a initié la requête, c'est-à-dire l'en-tête HTTP <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "請求 API 時傳入的 <code class='fn__code'>max_tokens</code> 參數,用於控制生成的文字長度", "apiMaxTokensTip": "請求 API 時傳入的 <code class='fn__code'>max_tokens</code> 參數,用於控制生成的文字長度",
"apiTemperature": "溫度", "apiTemperature": "溫度",
"apiTemperatureTip": "請求 API 時傳入的 <code class='fn__code'>temperature</code> 參數,用來控制產生的文字隨機性", "apiTemperatureTip": "請求 API 時傳入的 <code class='fn__code'>temperature</code> 參數,用來控制產生的文字隨機性",
"apiMaxContexts": "最大上下文數",
"apiMaxContextsTip": "請求 API 時傳入的最大上下文數",
"apiBaseURL": "API 基礎地址", "apiBaseURL": "API 基礎地址",
"apiBaseURLTip": "發起請求的基礎地址,如 <code class='fn__code'>https://api.openai.com/v1</code>", "apiBaseURLTip": "發起請求的基礎地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "發起請求的使用者代理,即 HTTP 標頭 <code class='fn__code'>User-Agent</code>", "apiUserAgentTip": "發起請求的使用者代理,即 HTTP 標頭 <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "请求 API 时传入的 <code class='fn__code'>max_tokens</code> 参数,用于控制生成的文本长度", "apiMaxTokensTip": "请求 API 时传入的 <code class='fn__code'>max_tokens</code> 参数,用于控制生成的文本长度",
"apiTemperature": "温度", "apiTemperature": "温度",
"apiTemperatureTip": "请求 API 时传入的 <code class='fn__code'>temperature</code> 参数,用于控制生成的文本随机性", "apiTemperatureTip": "请求 API 时传入的 <code class='fn__code'>temperature</code> 参数,用于控制生成的文本随机性",
"apiMaxContexts": "最大上下文数",
"apiMaxContextsTip": "请求 API 时传入的最大上下文数",
"apiBaseURL": "API 基础地址", "apiBaseURL": "API 基础地址",
"apiBaseURLTip": "发起请求的基础地址,如 <code class='fn__code'>https://api.openai.com/v1</code>", "apiBaseURLTip": "发起请求的基础地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
"apiVersion": "API 版本", "apiVersion": "API 版本",

View file

@ -35,6 +35,12 @@ export const ai = {
<input class="b3-text-field fn__flex-center fn__block" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/> <input class="b3-text-field fn__flex-center fn__block" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
<div class="b3-label__text">${window.siyuan.languages.apiTemperatureTip}</div> <div class="b3-label__text">${window.siyuan.languages.apiTemperatureTip}</div>
</div> </div>
<div class="b3-label">
${window.siyuan.languages.apiMaxContexts}
<div class="fn__hr"></div>
<input class="b3-text-field fn__flex-center fn__block" type="number" step="1" min="1" max="64" id="apiMaxContexts" value="${window.siyuan.config.ai.openAI.apiMaxContexts}"/>
<div class="b3-label__text">${window.siyuan.languages.apiMaxContextsTip}</div>
</div>
<div class="b3-label"> <div class="b3-label">
${window.siyuan.languages.apiModel} ${window.siyuan.languages.apiModel}
<div class="fn__hr"></div> <div class="fn__hr"></div>
@ -110,6 +116,14 @@ export const ai = {
<span class="fn__space"></span> <span class="fn__space"></span>
<input class="b3-text-field fn__flex-center fn__size200" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/> <input class="b3-text-field fn__flex-center fn__size200" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
</div> </div>
<div class="fn__flex b3-label">
<div class="fn__flex-1">
${window.siyuan.languages.apiMaxContexts}
<div class="b3-label__text">${window.siyuan.languages.apiMaxContextsTip}</div>
</div>
<span class="fn__space"></span>
<input class="b3-text-field fn__flex-center fn__size200" type="number" step="1" min="1" max="64" id="apiMaxContexts" value="${window.siyuan.config.ai.openAI.apiMaxContexts}"/>
</div>
<div class="fn__flex b3-label"> <div class="fn__flex b3-label">
<div class="fn__block"> <div class="fn__block">
${window.siyuan.languages.apiModel} ${window.siyuan.languages.apiModel}
@ -191,6 +205,7 @@ export const ai = {
apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value, apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value,
apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value), apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value),
apiTemperature: parseFloat((ai.element.querySelector("#apiTemperature") as HTMLInputElement).value), apiTemperature: parseFloat((ai.element.querySelector("#apiTemperature") as HTMLInputElement).value),
apiMaxContexts: parseInt((ai.element.querySelector("#apiMaxContexts") as HTMLInputElement).value),
apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value, apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value,
apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value), apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value),
apiProvider: (ai.element.querySelector("#apiProvider") as HTMLSelectElement).value, apiProvider: (ai.element.querySelector("#apiProvider") as HTMLSelectElement).value,

View file

@ -739,6 +739,7 @@ interface IConfig {
apiModel: string apiModel: string
apiMaxTokens: number apiMaxTokens: number
apiTemperature: number apiTemperature: number
apiMaxContexts: number
apiProxy: string apiProxy: string
apiTimeout: number apiTimeout: number
}, },

View file

@ -198,6 +198,10 @@ func setAI(c *gin.Context) {
ai.OpenAI.APITemperature = 1.0 ai.OpenAI.APITemperature = 1.0
} }
if 1 > ai.OpenAI.APIMaxContexts || 64 < ai.OpenAI.APIMaxContexts {
ai.OpenAI.APIMaxContexts = 7
}
model.Conf.AI = ai model.Conf.AI = ai
model.Conf.Save() model.Conf.Save()

View file

@ -35,6 +35,7 @@ type OpenAI struct {
APIModel string `json:"apiModel"` APIModel string `json:"apiModel"`
APIMaxTokens int `json:"apiMaxTokens"` APIMaxTokens int `json:"apiMaxTokens"`
APITemperature float64 `json:"apiTemperature"` APITemperature float64 `json:"apiTemperature"`
APIMaxContexts int `json:"apiMaxContexts"`
APIBaseURL string `json:"apiBaseURL"` APIBaseURL string `json:"apiBaseURL"`
APIUserAgent string `json:"apiUserAgent"` APIUserAgent string `json:"apiUserAgent"`
APIProvider string `json:"apiProvider"` // OpenAI, Azure APIProvider string `json:"apiProvider"` // OpenAI, Azure
@ -43,11 +44,13 @@ type OpenAI struct {
func NewAI() *AI { func NewAI() *AI {
openAI := &OpenAI{ openAI := &OpenAI{
APITimeout: 30, APITemperature: 1.0,
APIModel: openai.GPT3Dot5Turbo, APIMaxContexts: 7,
APIBaseURL: "https://api.openai.com/v1", APITimeout: 30,
APIUserAgent: util.UserAgent, APIModel: openai.GPT3Dot5Turbo,
APIProvider: "OpenAI", APIBaseURL: "https://api.openai.com/v1",
APIUserAgent: util.UserAgent,
APIProvider: "OpenAI",
} }
openAI.APIKey = os.Getenv("SIYUAN_OPENAI_API_KEY") openAI.APIKey = os.Getenv("SIYUAN_OPENAI_API_KEY")
@ -77,6 +80,13 @@ func NewAI() *AI {
} }
} }
if maxContexts := os.Getenv("SIYUAN_OPENAI_API_MAX_CONTEXTS"); "" != maxContexts {
maxContextsInt, err := strconv.Atoi(maxContexts)
if nil == err {
openAI.APIMaxContexts = maxContextsInt
}
}
if baseURL := os.Getenv("SIYUAN_OPENAI_API_BASE_URL"); "" != baseURL { if baseURL := os.Getenv("SIYUAN_OPENAI_API_BASE_URL"); "" != baseURL {
openAI.APIBaseURL = baseURL openAI.APIBaseURL = baseURL
} }

View file

@ -84,8 +84,8 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str
util.PushEndlessProgress("Requesting...") util.PushEndlessProgress("Requesting...")
defer util.ClearPushProgress(100) defer util.ClearPushProgress(100)
if 7 < len(contextMsgs) { if Conf.AI.OpenAI.APIMaxContexts < len(contextMsgs) {
contextMsgs = contextMsgs[len(contextMsgs)-7:] contextMsgs = contextMsgs[len(contextMsgs)-Conf.AI.OpenAI.APIMaxContexts:]
} }
var gpt GPT var gpt GPT
@ -96,7 +96,7 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str
} }
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
for i := 0; i < 7; i++ { for i := 0; i < Conf.AI.OpenAI.APIMaxContexts; i++ {
part, stop, chatErr := gpt.chat(msg, contextMsgs) part, stop, chatErr := gpt.chat(msg, contextMsgs)
buf.WriteString(part) buf.WriteString(part)

View file

@ -420,6 +420,9 @@ func InitConf() {
if 0 >= Conf.AI.OpenAI.APITemperature || 2 < Conf.AI.OpenAI.APITemperature { if 0 >= Conf.AI.OpenAI.APITemperature || 2 < Conf.AI.OpenAI.APITemperature {
Conf.AI.OpenAI.APITemperature = 1.0 Conf.AI.OpenAI.APITemperature = 1.0
} }
if 1 > Conf.AI.OpenAI.APIMaxContexts || 64 < Conf.AI.OpenAI.APIMaxContexts {
Conf.AI.OpenAI.APIMaxContexts = 7
}
if "" != Conf.AI.OpenAI.APIKey { if "" != Conf.AI.OpenAI.APIKey {
logging.LogInfof("OpenAI API enabled\n"+ logging.LogInfof("OpenAI API enabled\n"+
@ -429,14 +432,16 @@ func InitConf() {
" proxy=%s\n"+ " proxy=%s\n"+
" model=%s\n"+ " model=%s\n"+
" maxTokens=%d\n"+ " maxTokens=%d\n"+
" temperature=%.1f", " temperature=%.1f\n"+
" maxContexts=%d",
Conf.AI.OpenAI.APIUserAgent, Conf.AI.OpenAI.APIUserAgent,
Conf.AI.OpenAI.APIBaseURL, Conf.AI.OpenAI.APIBaseURL,
Conf.AI.OpenAI.APITimeout, Conf.AI.OpenAI.APITimeout,
Conf.AI.OpenAI.APIProxy, Conf.AI.OpenAI.APIProxy,
Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIModel,
Conf.AI.OpenAI.APIMaxTokens, Conf.AI.OpenAI.APIMaxTokens,
Conf.AI.OpenAI.APITemperature) Conf.AI.OpenAI.APITemperature,
Conf.AI.OpenAI.APIMaxContexts)
} }
Conf.ReadOnly = util.ReadOnly Conf.ReadOnly = util.ReadOnly