-
Notifications
You must be signed in to change notification settings - Fork 1
/
inference.py
107 lines (83 loc) · 3.2 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import instructor
from litellm import completion
from pydantic import BaseModel, Field
from enum import Enum
import yaml
from dotenv import load_dotenv
from llm_code_eval import utils as ice
load_dotenv()
class Score(BaseModel):
score: int = Field()
class FileMode(str, Enum):
create = "create"
overwrite = "overwrite"
change = "change"
class SnippetChange(BaseModel):
original_snippet: str = Field(
description="Empty in case of 'create' file mode. Otherwise: exact copy of the part of code you want to edit"
" (at least two lines, include correct spacing and indents like in the given source code).")
changed_snippet: str = Field(
description="Your changes on this piece of code. We will replace the original snippet by your updated snippet.")
class File(BaseModel):
file_path: str = Field(
description="Exact path of the file we need to change. Use the path structure like defined between '--',"
" starting with / and excluding the repo name.")
mode: FileMode = Field(
description="File mode should be in:\n'change': change snippets in existing file. \n 'overwrite': "
"change content of existing file completely. \n 'create': create new file.")
changes: list[SnippetChange] = Field(
description="Non-empty list of changes needed. Every SnippetChange should have changed_snippet code.")
class Changes(BaseModel):
file_to_change: list[File] = Field(
description="List of files to change. Keep empty if its not worth it to change anything.")
def __str__(self):
result = ""
for f in self.file_to_change:
result += f"\n{f.file_path}\n"
for change in f.changes:
result += f"\n--- source\n{change.original_snippet}\n\n--- changed\n{change.changed_snippet}---\n"
return result
# Load configuration
def load_config(config_path: str = "config/config.yaml"):
with open(config_path, "r") as file:
config = yaml.safe_load(file)
return config
# Get model based on the configuration
def get_model(config):
return config['provider_name'] + "/" + config['model_name']
client = instructor.from_litellm(completion)
class User(BaseModel):
name: str
age: int
def run_inference(model, user):
resp = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": "You are an experienced cloud/devops engineer. "
"Generate yhe necessary changes to solve this issue.",
},
{
"role": "user",
"content": user,
}
],
response_model=Changes,
)
return resp
def run_ice_prompt(model, aspect, problem, output):
prompt = ice.TASK_PROMPTS["code-gen"][aspect]["reference-free"]
prompt = prompt.replace("{{PROBLEM}}", problem).replace("{{OUTPUT}}", output)
resp = client.chat.completions.create(
model=model,
temperature=0,
messages=[
{
"role": "system",
"content": prompt,
}
],
response_model=Score,
)
return resp.score