-
Notifications
You must be signed in to change notification settings - Fork 0
/
ASL.py
79 lines (68 loc) · 2.71 KB
/
ASL.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import cv2
import numpy as np
import util as ut
import svm_train as st
import re
model = st.trainSVM(17)
# create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
cam = int(raw_input("Enter Camera number: "))
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
def nothing(x):
pass
text = " "
temp = 0
previouslabel = None
previousText = " "
label = None
while (cap.isOpened()):
if cap.read():
_, img = cap.read()
cv2.rectangle(img, (350, 128), (600, 400), (255, 0, 0),
3) # (0,0),(511,511),(255,0,0),5 bounding box which captures ASL sign to be detected by the system
img1 = img[128:400, 350:600]
img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
blur = cv2.GaussianBlur(img_ycrcb, (11, 11), 0)
skin_ycrcb_min = np.array((0, 138, 67)) # np.array((0, 138, 67))
skin_ycrcb_max = np.array((255, 173, 133)) # np.array((255, 173, 133))
mask = cv2.inRange(blur, skin_ycrcb_min,
skin_ycrcb_max) # detecting the hand in the bounding box using skin detection
contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, 2)
cnt = ut.getMaxContour(contours,3000) # using contours to capture the skin filtered image of the hand
forloop=0
try:
forloop=len(cnt)
except:
forloop=0
if forloop>0:#cnt.any() != None:
gesture, label = ut.getGestureImg(cnt, img1, mask,
model) # passing the trained model for prediction and fetching the result
if (label != None):
if (temp == 0):
previouslabel = label
if previouslabel == label:
previouslabel = label
temp += 1
else:
temp = 0
if (temp == 40):
if (label == 'P'):
label = " "
text = text + label
if (label == 'Q'):
words = re.split(" +", text)
words.pop()
text = " ".join(words)
# text=previousText
print text
cv2.imshow('PredictedGesture', gesture) # showing the best match or prediction
cv2.putText(img, label, (50, 150), font, 3, (0, 0, 255),
2) # displaying the predicted letter on the main screen
# cv2.putText(img, text, (50, 450), font, 3, (0, 0, 255), 2)
cv2.imshow('Frame', img)
cv2.imshow('Mask', mask)
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()