-
Notifications
You must be signed in to change notification settings - Fork 1
/
predict.py
105 lines (88 loc) · 2.57 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import cv2
import numpy as np
import os
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from scipy.cluster.vq import *
import glob
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn import datasets, metrics
#load the needed data
clf, features, classlist, slr, k, iterations = joblib.load("bof.pkl")
sift = cv2.xfeatures2d.SIFT_create()
testpath = glob.glob('/this/is/your/path/to/test/*.png')
#plot clusters
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
x,y,z = [],[],[]
for i in iterations:
x.append(i[0])
y.append(i[1])
z.append(i[2])
ax.scatter(x,y,z,zdir='z',s=100)
plt.show()
imagepaths=[]
classespath = "/this/is/your/path/to/test/"
deslist =[]
imageclasses=[]
paths=[]
classlabels=[]
classpaths=[]
classid=0
classlist=os.listdir(classespath)
print classlist
#label images
deslist1 = np.zeros(shape=(128,1))
for test in classlist:
dir = os.path.join(classespath,test)
classpath = os.listdir(dir)
classpaths.append(dir)
classlabel=test
imageclasses+=[classlabel]*len(classpath)
classlabels.append(classid)
classid+=1
print imageclasses
print classpaths
#get descriptor vectos and append them together
for j in classpaths:
p=glob.glob(j+"/*.png")
for img in p:
print img
paths.append(img)
im = cv2.imread(img)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
kp, des = sift.detectAndCompute(gray,None)
deslist.append((des))
deslist = np.array(deslist)
imageclasses = np.asarray(imageclasses)
testfeatures = np.zeros((len(imageclasses),k),'float32')
for i in xrange(len(imageclasses)):
words, distance = vq(deslist[i],iterations)
for w in words:
testfeatures[i][w] += 1
oc = np.sum( (testfeatures > 0)*1.0, axis=0)
idf = np.array(np.log((1.0*len(testpath)+1)/(1.0*oc+1)),'float32')
testfeatures = slr.transform(testfeatures)
h=0.02
print testfeatures.shape
#creat a prediction vector
predictions = [classlist[i] for i in clf.predict(testfeatures)]
for test, prediction, i in zip(paths,predictions,xrange(len(imageclasses))):
if prediction == imageclasses[i]:
print "true"
else:
print "false"
image = cv2.imread(test)
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
pt = (0,3*image.shape[0] // 4)
print prediction
cv2.putText(image,prediction,pt,cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 2,[0,255,0],2)
cv2.imshow("image",image)
cv2.waitKey(3000)
#confusion matrix
print("classification report for classifier %s:\n%s\n" % (clf,metrics.classification_report(imageclasses, predictions)))
c = confusion_matrix(imageclasses,predictions)
print "confusion matrix: "
print c