-
Notifications
You must be signed in to change notification settings - Fork 4
/
completion.go
143 lines (123 loc) · 3.24 KB
/
completion.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
package main
import (
"context"
"log"
"time"
ai "github.com/sashabaranov/go-openai"
)
type LLM interface {
ChatCompletionTask(context.Context, *CompletionRequest) (<-chan StreamResponse, error)
ChatCompletionStreamTask(context.Context, *CompletionRequest) (<-chan StreamResponse, error)
}
type CompletionRequest struct {
APIKey string
BaseURL string
Timeout time.Duration
Temperature float32
TopP float32
Model string
MaxTokens int
Session Session
ToolRegistry *ToolRegistry
ToolsEnabled bool
Stream bool
}
func NewCompletionRequest(ctx ChatContextInterface) *CompletionRequest {
config := ctx.GetConfig()
return &CompletionRequest{
APIKey: config.API.Key,
BaseURL: config.API.URL,
Timeout: config.API.Timeout,
Model: config.Model.Model,
MaxTokens: config.Model.MaxTokens,
Session: ctx.GetSession(),
Temperature: config.Model.Temperature,
TopP: config.Model.TopP,
ToolsEnabled: config.Bot.ToolsEnabled,
ToolRegistry: ctx.GetSystem().GetToolRegistry(),
Stream: config.API.Stream,
}
}
type StreamResponse struct {
ai.ChatCompletionStreamChoice
}
func CompleteWithText(ctx ChatContextInterface, msg string) (<-chan string, error) {
cmsg := ai.ChatCompletionMessage{
Role: ai.ChatMessageRoleUser,
Content: msg,
}
log.Printf("complete: %s %.64s...", cmsg.Role, cmsg.Content)
ctx.GetSession().AddMessage(cmsg)
return complete(ctx)
}
func complete(ctx ChatContextInterface) (<-chan string, error) {
session := ctx.GetSession()
config := ctx.GetConfig()
sys := ctx.GetSystem()
req := NewCompletionRequest(ctx)
llm := sys.GetLLM()
var respChan <-chan StreamResponse
var err error
if req.Stream {
respChan, err = llm.ChatCompletionStreamTask(ctx, req)
} else {
respChan, err = llm.ChatCompletionTask(ctx, req)
}
if err != nil {
log.Printf("error completing chat: %v", err)
return nil, err
}
textChan, toolChan, msgChan := NewChunker(config).FilterTask(respChan)
outputChan := make(chan string, 10)
go func() {
defer close(outputChan)
for {
select {
case toolCall, ok := <-toolChan:
if !ok {
toolChan = nil
} else {
toolch, _ := handleToolCall(ctx, toolCall)
for r := range toolch {
outputChan <- r
}
}
case reply, ok := <-textChan:
if !ok {
textChan = nil
} else {
outputChan <- string(reply)
}
case msg, ok := <-msgChan:
if !ok {
msgChan = nil
} else {
session.AddMessage(*msg)
}
}
if toolChan == nil && textChan == nil {
break
}
}
}()
return outputChan, nil
}
func handleToolCall(ctx ChatContextInterface, toolCall *ai.ToolCall) (<-chan string, error) {
log.Printf("Tool Call Received: %v", toolCall)
sys := ctx.GetSystem()
soultool, err := sys.GetToolRegistry().GetToolByName(toolCall.Function.Name)
if err != nil {
log.Printf("Error getting tool registration: %v", err)
return nil, err
}
toolmsg, err := soultool.Execute(ctx, *toolCall)
if err != nil {
log.Printf("error executing tool: %v", err)
}
ctx.GetSession().AddMessage(ai.ChatCompletionMessage{
Role: ai.ChatMessageRoleAssistant,
ToolCalls: []ai.ToolCall{*toolCall},
})
ctx.GetSession().AddMessage(toolmsg)
return complete(ctx)
}