-
Notifications
You must be signed in to change notification settings - Fork 24
/
starters.lua
260 lines (239 loc) · 7.15 KB
/
starters.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
-- providers
local openai = require('model.providers.openai')
local anthropic = require('model.providers.anthropic')
local palm = require('model.providers.palm')
local huggingface = require('model.providers.huggingface')
local llamacpp = require('model.providers.llamacpp')
local together = require('model.providers.together')
local ollama = require('model.providers.ollama')
-- prompt helpers
local extract = require('model.prompts.extract')
local consult = require('model.prompts.consult')
-- utils
local util = require('model.util')
local async = require('model.util.async')
local prompts = require('model.util.prompts')
local mode = require('model').mode
local function code_replace_fewshot(input, context)
local surrounding_text = prompts.limit_before_after(context, 30)
local content = 'The code:\n```\n'
.. surrounding_text.before
.. '<@@>'
.. surrounding_text.after
.. '\n```\n'
if context.selection then -- we only use input if we have a visual selection
content = content .. '\n\nExisting text at <@@>:\n```' .. input .. '```\n'
end
if #context.args > 0 then
content = content .. '\nInstruction: ' .. context.args
end
local messages = {
{
role = 'user',
content = content,
},
}
return {
instruction = 'You are an expert programmer. You are given a snippet of code which includes the symbol <@@>. Complete the correct code that should replace the <@@> symbol given the content. Only respond with the code that should replace the symbol <@@>. If you include any other code, the program will fail to compile and the user will be very sad.',
fewshot = {
{
role = 'user',
content = 'The code:\n```\nfunction greet(name) { console.log("Hello " <@@>) }\n```\n\nExisting text at <@@>: `+ nme`',
},
{
role = 'assistant',
content = '+ name',
},
},
messages = messages,
}
end
---@type table<string, Prompt>
local locals = {
['llamacpp:zephyr'] = {
provider = llamacpp,
options = {
model = 'zephyr-7b-beta.Q5_K_M.gguf',
args = {
'-c',
8192,
'-ngl',
35,
},
},
builder = function(input, context)
return {
prompt = '<|system|>'
.. (context.args or 'You are a helpful assistant')
.. '\n</s>\n<|user|>\n'
.. input
.. '</s>\n<|assistant|>',
}
end,
},
['ollama:starling'] = {
provider = ollama,
params = {
model = 'starling-lm',
},
builder = function(input)
return {
prompt = 'GPT4 Correct User: '
.. input
.. '<|end_of_turn|>GPT4 Correct Assistant: ',
}
end,
},
}
---@type table<string, Prompt>
local hosted = {
hf = huggingface.default_prompt,
['together:stripedhyena'] = {
provider = together,
params = {
model = 'togethercomputer/StripedHyena-Nous-7B', -- 32k model
max_tokens = 1024,
},
builder = function(input)
return {
prompt = '### Instruction:\n' .. input .. '\n\n### Response:\n',
stop = '</s>',
}
end,
},
['together:phind/codellama34b_v2'] = {
provider = together,
params = {
model = 'Phind/Phind-CodeLlama-34B-v2', -- 16k model
max_tokens = 1024,
},
builder = function(input)
return {
prompt = '### System Prompt\nYou are an intelligent programming assistant\n\n### User Message\n'
.. input
.. '\n\n### Assistant\n',
}
end,
},
['hf:starcoder'] = {
provider = huggingface,
options = {
model = 'bigcode/starcoder',
},
builder = function(input)
return { inputs = input }
end,
},
}
---@type table<string, Prompt>
local closed = {
gpt = openai.default_prompt,
palm = palm.default_prompt,
['openai:gpt4-code'] = {
provider = openai,
mode = mode.INSERT_OR_REPLACE,
params = {
temperature = 0.2,
max_tokens = 1000,
model = 'gpt-4o',
},
builder = function(input, context)
return openai.adapt(code_replace_fewshot(input, context))
end,
transform = extract.markdown_code,
},
['anthropic:claude-code'] = {
provider = anthropic,
mode = mode.INSERT_OR_REPLACE,
options = {
headers = {
['anthropic-beta'] = 'max-tokens-3-5-sonnet-2024-07-15',
},
trim_code = true,
},
params = {
max_tokens = 8192,
model = 'claude-3-5-sonnet-latest',
system = 'You are an expert programmer. Provide code which should go between the before and after blocks of code. Respond only with a markdown code block. Use comments within the code if explanations are necessary.',
},
builder = function(input, context)
local format = require('model.format.claude')
return vim.tbl_extend(
'force',
context.selection and format.build_replace(input, context)
or format.build_insert(context),
{
-- TODO this makes it impossible to get markdown in the response content
-- eventually we may want to allow markdown in the code-fenced response
stop_sequences = { '```' },
}
)
end,
},
}
---@type table<string, Prompt>
local closed_task = {
commit = {
provider = openai,
mode = mode.INSERT,
builder = function()
local git_diff = vim.fn.system({ 'git', 'diff', '--staged' })
if not git_diff:match('^diff') then
error('Git error:\n' .. git_diff)
end
return {
messages = {
{
role = 'user',
content = 'Write a terse commit message according to the Conventional Commits specification. Try to stay below 80 characters total. Staged git diff: ```\n'
.. git_diff
.. '\n```',
},
},
}
end,
},
openapi = {
-- Extract the relevant path from an OpenAPI spec and include in the gpt request.
-- Expects schema url as a command arg.
provider = openai,
mode = mode.BUFFER,
builder = function(input, context)
if context.args == nil or #context.args == 0 then
error(
'Provide the schema url as a command arg (:M openapi https://myurl.json)'
)
end
local schema_url = context.args
return function(build)
async(function(wait, resolve)
local schema = wait(extract.schema_descripts(schema_url, resolve))
util.show(schema.description, 'got openapi schema')
local route = wait(
consult.gpt_relevant_openapi_schema_path(schema, input, resolve)
)
util.show(route.relevant_route, 'api relevant route')
return {
messages = {
{
role = 'user',
content = 'API schema url: '
.. schema_url
.. '\n\nAPI description: '
.. route.schema.description
.. '\n\nRelevant path:\n'
.. vim.json.encode(route.relevant_route),
},
{
role = 'user',
content = input,
},
},
}
end, build)
end
end,
},
}
local starters = vim.tbl_extend('force', locals, hosted, closed, closed_task)
return starters