-
Notifications
You must be signed in to change notification settings - Fork 979
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Codex vuln detector - devtooligan updates #1499
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,7 @@ | |
|
||
logger = logging.getLogger("Slither") | ||
|
||
VULN_FOUND = "VULN_FOUND" | ||
|
||
class Codex(AbstractDetector): | ||
""" | ||
|
@@ -52,29 +53,63 @@ def _detect(self) -> List[Output]: | |
openai.api_key = api_key | ||
|
||
for contract in self.compilation_unit.contracts: | ||
prompt = "Is there a vulnerability in this solidity contracts?\n" | ||
if self.slither.codex_contracts != "all" and contract.name not in self.slither.codex_contracts.split(","): | ||
continue | ||
prompt = "Analyze this Solidity contract and find the vulnerabilities. If you find any vulnerabilities, begin the response with {}".format(VULN_FOUND) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could probably parameterize the prompt. Also, maybe we could just always display the Codex response, even if it doesn't find anything. This would simplify it so we don't need adjust the prompt or look for a special word in the response that indicates something was found. |
||
src_mapping = contract.source_mapping | ||
content = contract.compilation_unit.core.source_code[src_mapping.filename.absolute] | ||
start = src_mapping.start | ||
end = src_mapping.start + src_mapping.length | ||
prompt += content[start:end] | ||
answer = openai.Completion.create( # type: ignore | ||
model="text-davinci-003", prompt=prompt, temperature=0, max_tokens=200 | ||
) | ||
|
||
if "choices" in answer: | ||
if answer["choices"]: | ||
if "text" in answer["choices"][0]: | ||
if "Yes," in answer["choices"][0]["text"]: | ||
info = [ | ||
"Codex detected a potential bug in ", | ||
contract, | ||
"\n", | ||
answer["choices"][0]["text"], | ||
"\n", | ||
] | ||
|
||
res = self.generate_result(info) | ||
results.append(res) | ||
|
||
logging.info("Querying OpenAI") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wasn't 100% sure what to do with logging/printing . I noticed in this file we used the logger but it didn't display anything when Slither was run. And I noticed in other parts of the tool we use |
||
print("Querying OpenAI") | ||
answer = "" | ||
res = {} | ||
try: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added this try/catch because I was hitting the max token limit with a large contract in the prompt. Another reason to consider running this per-function. |
||
res = openai.Completion.create( # type: ignore | ||
prompt=prompt, | ||
model=self.slither.codex_model, | ||
temperature=self.slither.codex_temperature, | ||
max_tokens=self.slither.codex_max_tokens, | ||
) | ||
except Exception as e: | ||
print("OpenAI request failed: " + str(e)) | ||
logging.info("OpenAI request failed: " + str(e)) | ||
|
||
""" OpenAI completion response shape example: | ||
{ | ||
"choices": [ | ||
{ | ||
"finish_reason": "stop", | ||
"index": 0, | ||
"logprobs": null, | ||
"text": "VULNERABILITIES:. The withdraw() function does not check..." | ||
} | ||
], | ||
"created": 1670357537, | ||
"id": "cmpl-6KYaXdA6QIisHlTMM7RCJ1nR5wTKx", | ||
"model": "text-davinci-003", | ||
"object": "text_completion", | ||
"usage": { | ||
"completion_tokens": 80, | ||
"prompt_tokens": 249, | ||
"total_tokens": 329 | ||
} | ||
} """ | ||
|
||
if len(res.get("choices", [])) and VULN_FOUND in res["choices"][0].get("text", ""): | ||
# remove VULN_FOUND keyword and cleanup | ||
answer = res["choices"][0]["text"].replace(VULN_FOUND, "").replace("\n", "").replace(": ", "") | ||
|
||
if len(answer): | ||
info = [ | ||
"Codex detected a potential bug in ", | ||
contract, | ||
"\n", | ||
answer, | ||
"\n", | ||
] | ||
|
||
res = self.generate_result(info) | ||
results.append(res) | ||
return results |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Above in line 40, the
import openai
inside of this function was problematic when I tried to extract the OpenAI query logic into a separate function. I think it scopes the import to the function so I wasn't able to access it.Maybe we can move the
import
(wrapped in a try/except) to the top level? We could still do something here that checks whether it's been installed.