-
Notifications
You must be signed in to change notification settings - Fork 0
/
【PTChen's Version1】HandTrackingControlMouse_by_PTChen.py
61 lines (52 loc) · 2.15 KB
/
【PTChen's Version1】HandTrackingControlMouse_by_PTChen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import cv2
import mediapipe as mp
import pyautogui
webcam_id = 1
window_name = 'Hand Tracking Control Mouse Example'
desktop_width, desktop_height = pyautogui.size()
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For webcam input:
hands = mp_hands.Hands(
min_detection_confidence=0.7, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(webcam_id, cv2.CAP_DSHOW)
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
image_rows, image_cols, _ = image.shape
idx_to_coordinates = {}
for idx, landmark in enumerate(hand_landmarks.landmark):
if landmark.visibility < 0 or landmark.presence < 0:
continue
landmark_px = mp.python.solutions.drawing_utils._normalized_to_pixel_coordinates(landmark.x, landmark.y,
image_cols, image_rows)
if landmark_px:
idx_to_coordinates[idx] = landmark_px
# Screen Monitor
window_x, window_y, window_w, window_h = cv2.getWindowImageRect(window_name)
# xpos, ypos = kp_orig[4,:2]
xpos, ypos = idx_to_coordinates[8]
pyautogui.moveTo(window_x + xpos, window_y + ypos)
# x = xpos / window_w * desktop_width
# y = ypos / window_h * desktop_height
cv2.imshow(window_name, image)
if cv2.waitKey(5) & 0xFF == 27:
print(idx_to_coordinates)
break
hands.close()
cap.release()