-
Notifications
You must be signed in to change notification settings - Fork 0
/
SpamDetector.py
69 lines (46 loc) · 2.25 KB
/
SpamDetector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import nltk
from nltk import word_tokenize
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import cross_val_score
from imblearn.over_sampling import SMOTE
dataset = pd.read_csv('SMSSpamCollection.csv', sep='\t', names=['Label', 'Message'])
dataset['Message'] = dataset['Message'].str.replace(r'[^\w\s]', '').str.lower()
nltk.download('punkt')
nltk.download('stopwords')
dataset['Message'] = dataset['Message'].apply(word_tokenize)
stop_words = set(stopwords.words('english'))
dataset['Message'] = dataset['Message'].apply(lambda x: [word for word in x if word not in stop_words])
stemming = PorterStemmer()
dataset['Message'] = dataset['Message'].apply(lambda x: [stemming.stem(word) for word in x])
# Vectorization - for the significance of words in csv
dataset['Message'] = dataset['Message'].apply(lambda x: ''.join(x))
vectorize = TfidfVectorizer() # TF-IDF matrix words in column, messages in row.
X = vectorize.fit_transform(dataset['Message'])
# Labels (essential for classification) i.e. spam or not spam into num.
encoder = LabelEncoder()
y = encoder.fit_transform(dataset['Label']) # labels turned into num 1 for spam 0 for non spam
# Now to train the model - split test and train data. Then use the Bayes classifier (Naive)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Using SMOTE for undersampled class as data is imbalanaced
smote = SMOTE()
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
model = MultinomialNB()
model.fit(X_train_smote, y_train_smote)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
folds = 5
scores = cross_val_score(model, X, y, cv=folds, scoring='accuracy')
# Output the results
print(f"Accuracy scores for each fold: {scores}")
print(f"Average accuracy: {scores.mean()}")
# checking if data is imbalanced.
label_c = dataset['Label'].value_counts()
print(label_c)