-
Notifications
You must be signed in to change notification settings - Fork 3
/
textTest.py
331 lines (271 loc) · 9.97 KB
/
textTest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
# python eyeControl.py --shape-predictor /home/rohan/Downloads/shape_predictor_68_face_landmarks.dat
'''CHANGE THIS PATH TO YOUR LOCATION FOR THE PREDICTOR'''
SHAPE_PREDICTOR_PATH = r'/Users/shirley/Desktop/blinkception/shape_predictor_68_face_landmarks.dat'
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
import sys
import pickle
#from itertools import groupby
#import playground
import morse_code
import initialSetup
import faceRecognize
# import playground
# import sendEmail
# from sendEmail import sendEmail
# from sendEmail import sendWechatMessage
# from playground import interactElement, getCurrentElement, getNextElement
from selenium import webdriver
driver=webdriver.Firefox()
driver.get('https://www.google.com/')
user=faceRecognize.is_recognized()
f=open(r'records/userThresholds.txt','rb')
try:
userThresholds=pickle.load(f)
except:
userThresholds={}
f.close()
USER_EXISTS=False
CARRY_ON_SETUP=True
if bool(user)==True:
USER_EXISTS=True
if user in list(userThresholds.keys()):
CARRY_ON_SETUP=False
setup1=initialSetup.setup(SHAPE_PREDICTOR_PATH)
if CARRY_ON_SETUP and USER_EXISTS:
BROW_EAR_THRESH=setup1.browSetup();time.sleep(1)
MOUTH_THRESH=setup1.mouthSetup();time.sleep(1)
userThresholds[user]=[BROW_EAR_THRESH,MOUTH_THRESH]
pickle.dump(userThresholds,open('userThresholds.txt','wb'))
elif USER_EXISTS==False:
print('Please Register!!!!!')
sys.exit()
else:
BROW_EAR_THRESH,MOUTH_THRESH=userThresholds[user]
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def distance(x,y):
a,b=x
c,d=y
return round(((a-c)**2 + (b-d)**2)**0.5,3)
def avg(L):
return sum(L)/len(L)
def std_dev(L): # to find standard deviation of L
mean=avg(L)
variance=sum([(x-mean)**2 for x in L])/len(L)
return round(variance**0.5,3)
#def patternFunc1():
# playground.fn1()
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--shape-predictor", required=True,
# help="path to facial landmark predictor")
#ap.add_argument("-v", "--video", type=str, default="",
# help="path to input video file")
#args = vars(ap.parse_args())
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 3
# BROW_EAR_THRESH=setup1.browSetup();time.sleep(1)
# BROW_EAR_THRESH=0.6261359380652183
RAISE_AR_CONSEC_FRAMES = 3
# initialize the frame BLINK_COUNTERs and the total number of blinks
BLINK_COUNTER = 0
BLINK_TOTAL = 0
RAISE_COUNTER=0
RAISE_TOTAL=0
# MOUTH_THRESH=setup1.mouthSetup();time.sleep(1) #0.6553960565298503
# MOUTH_THRESH=0.6553960565298503
MOUTH_COUNTER=0
MOUTH_TOTAL=0
MOUTH_AR_CONSEC_FRAMES=3
# BROW_RAISE_THRESHOLD=1
TIME=0
beg,end=0,0
'''
'r': eyebrow raise,
'b': blink
'/': mouth open
'''
pattern_list=[]
currentWord=[]
#pattern_dict={'brb':patternFunc1}
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
#print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = setup1.predictor # dlib.shape_predictor(args["shape_predictor"])
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(lBrowStart,lBrowEnd)=face_utils.FACIAL_LANDMARKS_IDXS['left_eyebrow']
(rBrowStart,rBrowEnd)=face_utils.FACIAL_LANDMARKS_IDXS['right_eyebrow']
(noseStart,noseEnd)=face_utils.FACIAL_LANDMARKS_IDXS['nose']
(mouthStart,mouthEnd)=face_utils.FACIAL_LANDMARKS_IDXS['mouth']
# start the video stream thread
print("[INFO] starting video stream thread...")
# vs = FileVideoStream(args["video"]).start()
# fileStream = True
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
fileStream = False
time.sleep(1.0)
# loop over frames from the video stream
while True:
# if this is a file video stream, then we need to check if
# there any more frames left in the buffer to process
if fileStream and not vs.more():
break
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use it to compute the eye aspect ratio (EAR)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# values got from face_utils.FACIAL_LANDMARKS_IDXS
leftBrowMid,leftEyeMid=25,47
rightBrowMid,rightEyeMid=20,42
distLeft=distance(shape[leftBrowMid],shape[leftEyeMid])
distRight=distance(shape[rightBrowMid],shape[rightEyeMid])
# print(distLeft,distRight)
nose_mouth_dist=distance(shape[52],shape[34])
leftBrow=shape[lBrowStart:lBrowEnd]
rightBrow=shape[rBrowStart:rBrowEnd]
rightBrowEye=np.array([shape[i] for i in [17,18,20,21,36,39]])
leftBrowEye=np.array([shape[i] for i in [22,23,25,26,45,42]])
mouth=np.array([shape[i] for i in [48,50,52,54,56,58]])
leftBrowEar=eye_aspect_ratio(leftBrowEye)
rightBrowEar=eye_aspect_ratio(rightBrowEye)
browEar=(leftBrowEar+rightBrowEar)/2.0
# nose=shape[noseStart:noseEnd]
# mouth=shape[mouthStart:mouthEnd]
mouthEar=eye_aspect_ratio(mouth)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
leftBrowHull=cv2.convexHull(leftBrow)
rightBrowHull=cv2.convexHull(rightBrow)
leftBrowEyeHull=cv2.convexHull(leftBrowEye)
rightBrowEyeHull=cv2.convexHull(rightBrowEye)
mouthHull=cv2.convexHull(mouth)
# noseHull=cv2.convexHull(nose)
# mouthHull=cv2.convexHull(mouth)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [leftBrowHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightBrowHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [leftBrowEyeHull], -1, (14,237,255), 1)
cv2.drawContours(frame, [rightBrowEyeHull], -1, (14,237,255), 1)
cv2.drawContours(frame, [mouthHull], -1, (14,237,255), 1)
# cv2.drawContours(frame, [noseHull], -1, (0, 255, 0), 1)
# cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)
'''for the eye'''
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame BLINK_COUNTER
if ear < EYE_AR_THRESH:
BLINK_COUNTER += 1
# otherwise, the eye aspect ratio is not below the blink
# threshold
else:
# if the eyes were closed for a sufficient number of
# then increment the total number of blinks
if BLINK_COUNTER >= EYE_AR_CONSEC_FRAMES:
BLINK_TOTAL += 1
pattern_list.append('.')
beg=end
end=time.time()
if (RAISE_TOTAL,BLINK_TOTAL) in [(1,0),(0,1)]:TIME=0 # assumed zero because we dont know the time of the previous blink
else:TIME=end-beg
# reset the eye frame BLINK_COUNTER
BLINK_COUNTER = 0
'''for the brow'''
# BROW_EAR_THRESH is 0.7, setting a cap at 0.9 to prevent side-view weirdness
if 0.9 > browEar > BROW_EAR_THRESH+0.1: # greater than the threshold here
RAISE_COUNTER += 1
else:
if RAISE_COUNTER >= RAISE_AR_CONSEC_FRAMES:
RAISE_TOTAL += 1
pattern_list.append('-')
beg=end
end=time.time()
if (RAISE_TOTAL,BLINK_TOTAL) in [(1,0),(0,1)]:TIME=0 # assumed zero because we dont know the time of the previous blink
else:TIME=end-beg
# reset the eye frame BLINK_COUNTER
RAISE_COUNTER = 0
'''for the mouth'''
if mouthEar > (MOUTH_THRESH-0.2): # greater than the threshold here
MOUTH_COUNTER += 1
else:
if MOUTH_COUNTER >= MOUTH_AR_CONSEC_FRAMES:
MOUTH_TOTAL += 1
pattern_list.append('/')
MOUTH_COUNTER = 0
# draw the total number of blinks on the frame along with
# the computed eye aspect ratio for the frame
cv2.putText(frame, "Blinks: {}".format(BLINK_TOTAL), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Raises: {}".format(RAISE_TOTAL), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "mouth_opens: {}".format(MOUTH_TOTAL), (10, 90),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
#time between blinks
cv2.putText(frame, "Time: {:.2f}".format(TIME), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, " EAR: {:.2f}".format(ear), (300, 30), # ear aspect ratio
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, " m_EAR: {:.2f}".format(mouthEar), (280, 50), # ear aspect ratio
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if (pattern_list and pattern_list[-1]=='/') or len(pattern_list) > 6:
RAISE_TOTAL,BLINK_TOTAL=0,0
message = "".join(pattern_list)[:-1] # to exclude the backslash
print(message)
if message in morse_code.inverseMorseAlphabet.keys():
message = morse_code.decrypt(message)
driver.find_element_by_id('lst-ib').send_keys(message)
if message !=' ':
currentWord+=message
else:
print('WORD: '+''.join(currentWord))
currentWord=[]
print(message)
# interactElement(getCurrentElement(), word = message)
pattern_list=[]
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()