-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.js
146 lines (123 loc) · 3.8 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import * as poseDetection from '@tensorflow-models/pose-detection';
import '@tensorflow/tfjs-backend-webgl';
let video = document.getElementById('video');
let canvas = document.getElementById('output');
let ctx = canvas.getContext('2d');
let detector, model;
const scoreThreshold = 0.6;
async function createDetector() {
model = poseDetection.SupportedModels.BlazePose;
const detectorConfig = {
runtime: "tfjs",
enableSmoothing: true,
modelType: "full"
};
detector = await poseDetection.createDetector(model, detectorConfig);
}
async function activateVideo() {
if(navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({'video': {
width: '640',
height: '480'
}}).then(stream => {
video.srcObject = stream;
})
.catch(e => {
console.log("Error occurred while getting the video stream");
});
}
video.onloadedmetadata = () => {
const videoWidth = video.videoWidth;
const videoHeight = video.videoHeight;
video.width = videoWidth;
video.height = videoHeight;
canvas.width = videoWidth;
canvas.height = videoHeight;
// Because the image from camera is mirrored, need to flip horizontally.
ctx.translate(videoWidth, 0);
ctx.scale(-1, 1);
};
video.addEventListener("loadeddata", predictPoses);
}
async function predictPoses() {
let poses = null;
if (detector != null) {
try {
poses = await detector.estimatePoses(video, {
flipHorizontal: false
});
} catch (error) {
detector.dispose();
detector = null;
alert(error);
}
}
ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight);
if (poses && poses.length > 0) {
for (const pose of poses) {
if (pose.keypoints != null) {
drawKeypoints(pose.keypoints);
drawSkeleton(pose.keypoints);
}
}
}
window.requestAnimationFrame(predictPoses);
}
function drawKeypoints(keypoints) {
ctx.fillStyle = 'Green';
ctx.strokeStyle = 'White';
ctx.lineWidth = 2;
for(let i=0; i<keypoints.length; i++) {
drawKeypoint(keypoints[i]);
}
}
function drawKeypoint(keypoint) {
const radius = 4;
if (keypoint.score >= scoreThreshold) {
const circle = new Path2D();
circle.arc(keypoint.x, keypoint.y, radius, 0, 2 * Math.PI);
ctx.fill(circle);
ctx.stroke(circle);
}
}
/* function drawKeypointsColor(keypoints) {
const keypointInd = poseDetection.util.getKeypointIndexBySide(model);
ctx.strokeStyle = 'White';
ctx.lineWidth = 2;
ctx.fillStyle = 'Red';
for (const i of keypointInd.middle) {
drawKeypoint(keypoints[i]);
}
ctx.fillStyle = 'Green';
for (const i of keypointInd.left) {
drawKeypoint(keypoints[i]);
}
ctx.fillStyle = 'Orange';
for (const i of keypointInd.right) {
drawKeypoint(keypoints[i]);
}
} */
function drawSkeleton(keypoints) {
const color = "#fff";
ctx.fillStyle = color;
ctx.strokeStyle = color;
ctx.lineWidth = 2;
poseDetection.util.getAdjacentPairs(model)
.forEach(([i, j]) => {
const kp1 = keypoints[i];
const kp2 = keypoints[j];
if (kp1.score >= scoreThreshold && kp2.score >= scoreThreshold) {
ctx.beginPath();
ctx.moveTo(kp1.x, kp1.y);
ctx.lineTo(kp2.x, kp2.y);
ctx.stroke();
}
});
}
async function app() {
//Load the model and create a detector object
await createDetector();
//Enable camera and activate video
await activateVideo();
};
app();