Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add llama2 llm #1116

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Thanks to these individuals for making reNgine awesome by fixing bugs, resolving
* [Suprita-25](https://github.com/Suprita-25)
* [TheBinitGhimire](https://github.com/TheBinitGhimire)
* [Vinay Leo](https://github.com/vinaynm)
* [Erdem Ozgen](https://github.com/ErdemOzgen)

*If you have created a Pull request, feel free to add your name here, because we know you are awesome and deserve thanks from the community!*

Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
COMPOSE_PREFIX_CMD := COMPOSE_DOCKER_CLI_BUILD=1

COMPOSE_ALL_FILES := -f docker-compose.yml
SERVICES := db web proxy redis celery celery-beat
SERVICES := db web proxy redis celery celery-beat ollama

# --------------------------

Expand All @@ -20,7 +20,7 @@ setup: ## Generate certificates.

up: ## Build and start all services.
${COMPOSE_PREFIX_CMD} docker-compose ${COMPOSE_ALL_FILES} up -d --build ${SERVICES}

docker exec -it ollama ollama run llama2
build: ## Build all services.
${COMPOSE_PREFIX_CMD} docker-compose ${COMPOSE_ALL_FILES} build ${SERVICES}

Expand Down
12 changes: 12 additions & 0 deletions docker-compose.dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,17 @@ services:
- celery-beat
networks:
- rengine_network
ollama:
image: ollama/ollama
container_name: ollama
volumes:
- ollama_data:/root/.ollama
ports:
- "11434:11434"
networks:
- rengine_network
restart: always
command: ["ollama", "run", "llama2-uncensored"]

networks:
rengine_network:
Expand All @@ -126,3 +137,4 @@ volumes:
github_repos:
wordlist:
scan_results:
ollama_data:
11 changes: 10 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,15 @@ services:
- scan_results:/usr/src/scan_results
networks:
- rengine_network

ollama:
image: ollama/ollama
container_name: ollama
volumes:
- ollama_data:/root/.ollama
ports:
- "11434:11434"
networks:
- rengine_network

networks:
rengine_network:
Expand All @@ -154,6 +162,7 @@ volumes:
wordlist:
scan_results:
static_volume:
ollama_data:

secrets:
proxy.ca:
Expand Down
2 changes: 2 additions & 0 deletions web/celery-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@ exec "$@"
# httpx seems to have issue, use alias instead!!!
echo 'alias httpx="/go/bin/httpx"' >> ~/.bashrc

# for localgpt install langchain
python3 -m pip install langchain==0.0.343

# watchmedo auto-restart --recursive --pattern="*.py" --directory="/usr/src/app/reNgine/" -- celery -A reNgine.tasks worker --autoscale=10,0 -l INFO -Q scan_queue &
echo "Starting Workers..."
Expand Down
151 changes: 76 additions & 75 deletions web/reNgine/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@
import re
from reNgine.common_func import get_open_ai_key, extract_between
from reNgine.definitions import VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE, ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT
from langchain.llms import Ollama

class GPTVulnerabilityReportGenerator:

def __init__(self):
self.api_key = get_open_ai_key()
self.model_name = 'gpt-3.5-turbo'

if not self.api_key:
self.ollama = Ollama(base_url='http://ollama:11434', model="llama2-uncensored")

def get_vulnerability_description(self, description):
"""Generate Vulnerability Description using GPT.

Expand All @@ -24,93 +27,91 @@ def get_vulnerability_description(self, description):
}
"""
if not self.api_key:
return {
'status': False,
'error': 'No OpenAI keys provided.'
}
openai.api_key = self.api_key
try:
gpt_response = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{'role': 'system', 'content': VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE},
{'role': 'user', 'content': description}
]
)
prompt = ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT + "\nUser: " + input
response_content = self.ollama(prompt)
else:
openai.api_key = self.api_key
try:
gpt_response = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{'role': 'system', 'content': VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE},
{'role': 'user', 'content': description}
]
)

response_content = gpt_response['choices'][0]['message']['content']
response_content = gpt_response['choices'][0]['message']['content']
except Exception as e:
return {
'status': False,
'error': str(e)
}
vuln_description_pattern = re.compile(
r"[Vv]ulnerability [Dd]escription:(.*?)(?:\n\n[Ii]mpact:|$)",
re.DOTALL
)
impact_pattern = re.compile(
r"[Ii]mpact:(.*?)(?:\n\n[Rr]emediation:|$)",
re.DOTALL
)
remediation_pattern = re.compile(
r"[Rr]emediation:(.*?)(?:\n\n[Rr]eferences:|$)",
re.DOTALL
)

vuln_description_pattern = re.compile(
r"[Vv]ulnerability [Dd]escription:(.*?)(?:\n\n[Ii]mpact:|$)",
re.DOTALL
)
impact_pattern = re.compile(
r"[Ii]mpact:(.*?)(?:\n\n[Rr]emediation:|$)",
re.DOTALL
)
remediation_pattern = re.compile(
r"[Rr]emediation:(.*?)(?:\n\n[Rr]eferences:|$)",
re.DOTALL
)
description_section = extract_between(response_content, vuln_description_pattern)
impact_section = extract_between(response_content, impact_pattern)
remediation_section = extract_between(response_content, remediation_pattern)
references_start_index = response_content.find("References:")
references_section = response_content[references_start_index + len("References:"):].strip()

description_section = extract_between(response_content, vuln_description_pattern)
impact_section = extract_between(response_content, impact_pattern)
remediation_section = extract_between(response_content, remediation_pattern)
references_start_index = response_content.find("References:")
references_section = response_content[references_start_index + len("References:"):].strip()

url_pattern = re.compile(r'https://\S+')
urls = url_pattern.findall(references_section)

return {
'status': True,
'description': description_section,
'impact': impact_section,
'remediation': remediation_section,
'references': urls,
}
except Exception as e:
return {
'status': False,
'error': str(e)
}
url_pattern = re.compile(r'https://\S+')
urls = url_pattern.findall(references_section)

return {
'status': True,
'description': description_section,
'impact': impact_section,
'remediation': remediation_section,
'references': urls,
}

class GPTAttackSuggestionGenerator:

def __init__(self):
self.api_key = get_open_ai_key()
self.model_name = 'gpt-3.5-turbo'
if not self.api_key:
self.ollama = Ollama(base_url='http://ollama:11434', model="llama2-uncensored")

def get_attack_suggestion(self, input):
'''
input (str): input for gpt
'''
if not self.api_key:
return {
'status': False,
'error': 'No OpenAI keys provided.',
'input': input
}
openai.api_key = self.api_key
print(input)
try:
gpt_response = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{'role': 'system', 'content': ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT},
{'role': 'user', 'content': input}
]
)
response_content = gpt_response['choices'][0]['message']['content']
return {
'status': True,
'description': response_content,
'input': input
}
except Exception as e:
return {
'status': False,
'error': str(e),
'input': input
}
prompt = ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT + "\nUser: " + input
response_content = self.ollama(prompt)
else:
openai.api_key = self.api_key
print(input)
try:
gpt_response = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{'role': 'system', 'content': ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT},
{'role': 'user', 'content': input}
]
)
response_content = gpt_response['choices'][0]['message']['content']
except Exception as e:
return {
'status': False,
'error': str(e),
'input': input
}
return {
'status': True,
'description': response_content,
'input': input
}

1 change: 1 addition & 0 deletions web/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,4 @@ whatportis
weasyprint==53.3
wafw00f==2.2.0
xmltodict==0.13.0
langchain==0.0.343
Loading