-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathtest_pretrained_model.py
83 lines (72 loc) · 2.88 KB
/
test_pretrained_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Basic libraries
import os
import io
import sys
import math
import time
import random
import requests
import collections
import numpy as np
from os import walk
import pandas as pd
from joblib import dump, load
from langdetect import detect
from tokenizers import ByteLevelBPETokenizer
"""
How to run:
python3 test_pretrained_model.py --tokenizer_folder pretrained_models --threshold 0.5 --model_dir pretrained_models --website_to_test https://www.google.com
"""
# Parsing arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--tokenizer_folder", type=str, default = "pretrained_models", help="Folder where tokenizer files have been placed")
parser.add_argument("--threshold", type=float, default = 0.5, help="Which threshold to use for testing")
parser.add_argument("--model_dir", type=str, default = "pretrained-models", help="Directory of trained models.")
parser.add_argument("--website_to_test", type=str, default = "https://www.google.com", help="Website to test")
args = parser.parse_args()
tokenizerFolder = args.tokenizer_folder
savedModelDirectory = args.model_dir
websiteToTest = args.website_to_test
threshold = args.threshold
# Loading files
# Load tokenization files
tokenizer = ByteLevelBPETokenizer(
tokenizerFolder + "/pretrained_Tokenizer-10000.tok-vocab.json",
tokenizerFolder + "/pretrained_Tokenizer-10000.tok-merges.txt",
)
tokenizerVocabSize = tokenizer.get_vocab_size()
print("Tokenizer files have been loaded and the vocab size is %d..." % tokenizerVocabSize)
# Load saved model
model = load(savedModelDirectory + "/pretrained-phishytics-model.joblib")
print("Model loaded...")
# Load document frequency dictionary
docDict = np.load(savedModelDirectory + "/document-frequency-dictionary.npy", allow_pickle=True).item()
print("Document frequency dictionary loaded...")
# Testing
print("Loading webpage...")
try:
request = requests.get(websiteToTest)
webpageHtml = str(request.text)
webpageHtml = webpageHtml.replace("\n", " ")
# Convert text into feature vector
output = tokenizer.encode(webpageHtml)
outputDict = collections.Counter(output.ids)
except Exception as e:
print("**** Error loading the website ****")
print(e)
exit()
# Apply tfidf weighting
totalFilesUnderConsideration = 18500 # total number of documents/html files in our training data
array = [0] * tokenizerVocabSize
for item in outputDict:
if len(docDict[item]) > 0:
array[item] = (outputDict[item]) * (math.log10( totalFilesUnderConsideration / len(docDict[item] )))
# Getting predictions
predictionProbability = model.predict_proba([array])[0][1]
print("\n****************************\n--> Probability that the website is phishing: %.2f" % (predictionProbability * 100))
prediction = "NOT PHISHING"
if predictionProbability > threshold:
prediction = "PHISHING"
print("--> Based on your threshold of %.2f, this website is +++'%s'+++" % (threshold, prediction))
print("****************************")