学习记录-利用TensorFlow.js实现人体识别

# 背景

一直在弄机器视觉,
关于人体识别方面的东西,
偶然发现 TensorFlow.js 的一个模型
PoseNet Model
有趣的事情就从此开始了

# 环境信息

首先将我的开发环境介绍一下
摄像头: RGB摄像头就可以
系统: Ubuntu 16.04
浏览器: Chrome Version 75.0.3770.100 (Official Build) (64-bit)
Node.js: 10.16.0
yarn: 1.17.3
npm: 6.9.0

我好像直接是写在HTML里面,所以不需要Nodejs环境

# 码代码

发现在官方的仓库已经有完整的代码和教程,
我就不放了
这是地址: https://github.com/tensorflow/tfjs-models/tree/master/posenet

# 参考资料:

用TensorFlow.js实现人体姿态估计模型(上):https://www.jianshu.com/p/b0bcedd88a8e

# 我的代码

任何不能复现的技术博客都是扯淡,
我把我的代码放一下吧,
也可以直接加群: 492781269
群文件里有
20190722113903.png

然后下面放全部的代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

<html>
<head>
<!-- Load TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<!-- Load Posenet -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/posenet"></script>
<style>

#videoBox {
min-width: 100%;
min-height: 100%;
position: absolute;
top: 0;
left: 0;
}
#myVideo {
min-width: 100%;
min-height: 100%;
position: absolute;
transform: scaleX(-1);

}
#output {
position: absolute;
z-index: 3;
}
</style>
</head>

<body>
<div id="videoBox">
<video src="" id="myVideo" autoplay="autoplay"></video>
<canvas id="output" ></canvas>
</div>

<h1 id="myTitle">loading model......</h1>
</body>

<script>

const myVideo = document.querySelector("#myVideo");
const myCanvas = document.querySelector("#output");
const ctx = myCanvas.getContext('2d');
var net = {};

posenet.load()
.then((net1) => {
document.querySelector("#myTitle").style.display = "none";
net = net1;
setupCamera();
})

function poseDetectionFrame() {

net.estimateSinglePose(myVideo, {
flipHorizontal: true // 目前单人模式,多人模式的设置 参考官方例程
})
.then((pose) => {
let score = pose.score;
let keypoints = pose.keypoints;
if (score >= 0.2) {
ctx.clearRect(0, 0, myCanvas.width, myCanvas.height);
for (let i = 0; i < keypoints.length; i++) {
const keypoint = keypoints[i];

if(keypoint.score > 0.1) {

const {y, x} = keypoint.position;
drawPoint(ctx, y, x, 10, "red");
}
}
}
});

requestAnimationFrame(poseDetectionFrame);

}

function setupCamera() {

let exArray = [];
//web rtc 调用摄像头(兼容性写法(谷歌、火狐、ie))
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;

//遍历摄像头
navigator.mediaDevices.enumerateDevices()
.then(function (sourceInfos) {
for (var i = 0; i < sourceInfos.length; ++i) {
if (sourceInfos[i].kind == 'videoinput') {
exArray.push(sourceInfos[i].deviceId);
}
}
})
.then(() => {
// 因为我这里是有三个摄像头,我需要取最后一个摄像头
let deviceId = exArray[exArray.length - 1]; // 取最后一个摄像头,(深度,灰度,RGB)

navigator.mediaDevices.getUserMedia({
audio: false,
video: {
deviceId: deviceId
}
})
.then(stream => { // 参数表示需要同时获取到音频和视频
// 获取到优化后的媒体流
myVideo.srcObject = stream;
myVideo.onloadedmetadata = () => {
myVideo.width = myVideo.offsetWidth;
myVideo.height = myVideo.offsetHeight;
myCanvas.width = myVideo.width;
myCanvas.height = myVideo.height;
poseDetectionFrame();
};

})
.catch(err => {
// 捕获错误
console.log
});
});
}

function drawPoint(ctx, y, x, r, color) {
ctx.beginPath();
ctx.arc(x, y, r, 0, 2 * Math.PI);
ctx.fillStyle = color;
ctx.fill();
}

</script>
</html>

PS:
如有错误,还请多多指出来~

– Nick
– 2019/07/22