From a9ccb0605c3ce9d4ca534e10e0f79f6eb251d1c7 Mon Sep 17 00:00:00 2001 From: Peter Pan Date: Fri, 15 Sep 2023 01:36:03 -0400 Subject: [PATCH] feat: OpenAI - explicit value for MaxToken and Temp Because when k8sgpt talks with vLLM, the default MaxToken is 16, which is so small. Given the most model supports 2048 token(like Llama1 ..etc), so put here for a safe value. Signed-off-by: Peter Pan --- pkg/ai/openai.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/ai/openai.go b/pkg/ai/openai.go index 7d9e6797af..c37a8b1506 100644 --- a/pkg/ai/openai.go +++ b/pkg/ai/openai.go @@ -34,6 +34,15 @@ type OpenAIClient struct { model string } +const ( + // OpenAI completion parameters + maxToken = 2048 + temperature = 0.7 + presencePenalty = 0.0 + frequencyPenalty = 0.0 + topP = 1.0 +) + func (c *OpenAIClient) Configure(config IAIConfig, language string) error { token := config.GetPassword() defaultConfig := openai.DefaultConfig(token) @@ -66,6 +75,11 @@ func (c *OpenAIClient) GetCompletion(ctx context.Context, prompt string, promptT Content: fmt.Sprintf(promptTmpl, c.language, prompt), }, }, + MaxTokens: maxToken, + Temperature: temperature, + PresencePenalty: presencePenalty, + FrequencyPenalty: frequencyPenalty, + TopP: topP, }) if err != nil { return "", err