Skip to content

Commit af6421a

Browse files
committed
working
1 parent e7c7aee commit af6421a

File tree

1 file changed

+100
-0
lines changed

1 file changed

+100
-0
lines changed

html/rps-tf/index.html

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
<!DOCTYPE html>
2+
<html>
3+
4+
<head>
5+
<meta charset="utf-8">
6+
<title>Display Webcam Stream</title>
7+
8+
<style>
9+
#videoContainer {
10+
margin: 0px auto;
11+
width: 500px;
12+
height: 375px;
13+
border: 1px #333 solid;
14+
}
15+
16+
#videoOutput {
17+
width: 500px;
18+
height: 375px;
19+
background-color: #666;
20+
-webkit-transform: scaleX(-1);
21+
transform: scaleX(-1);
22+
}
23+
</style>
24+
</head>
25+
26+
<body onload="init()">
27+
<div id="videoContainer">
28+
<video autoplay id="videoOutput"></video>
29+
</div>
30+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
31+
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/handpose"></script>
32+
<script>
33+
function init() {
34+
const video = document.querySelector('#videoOutput');
35+
36+
if (navigator.mediaDevices.getUserMedia) {
37+
navigator.mediaDevices.getUserMedia({ video: true })
38+
.then(function (stream) {
39+
video.srcObject = stream;
40+
main();
41+
})
42+
.catch(function (error) {
43+
console.log('Something went wrong!');
44+
});
45+
} else {
46+
console.log('no webcam!');
47+
}
48+
}
49+
50+
async function main() {
51+
// Load the MediaPipe handpose model.
52+
const model = await handpose.load();
53+
console.log('model loaded');
54+
// Pass in a video stream (or an image, canvas, or 3D tensor) to obtain a
55+
// hand prediction from the MediaPipe graph.
56+
const predictions = await model.estimateHands(document.querySelector('#videoOutput'));
57+
console.log(predictions);
58+
if (predictions.length > 0) {
59+
/*
60+
`predictions` is an array of objects describing each detected hand, for example:
61+
[
62+
{
63+
handInViewConfidence: 1, // The probability of a hand being present.
64+
boundingBox: { // The bounding box surrounding the hand.
65+
topLeft: [162.91, -17.42],
66+
bottomRight: [548.56, 368.23],
67+
},
68+
landmarks: [ // The 3D coordinates of each hand landmark.
69+
[472.52, 298.59, 0.00],
70+
[412.80, 315.64, -6.18],
71+
...
72+
],
73+
annotations: { // Semantic groupings of the `landmarks` coordinates.
74+
thumb: [
75+
[412.80, 315.64, -6.18]
76+
[350.02, 298.38, -7.14],
77+
...
78+
],
79+
...
80+
}
81+
}
82+
]
83+
*/
84+
85+
for (let i = 0; i < predictions.length; i++) {
86+
const keypoints = predictions[i].landmarks;
87+
88+
// Log hand keypoints.
89+
for (let i = 0; i < keypoints.length; i++) {
90+
const [x, y, z] = keypoints[i];
91+
console.log(`Keypoint ${i}: [${x}, ${y}, ${z}]`);
92+
}
93+
}
94+
}
95+
}
96+
97+
</script>
98+
</body>
99+
100+
</html>

0 commit comments

Comments
 (0)