-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscript.js
35 lines (35 loc) · 1.4 KB
/
script.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
const video = document.getElementById('video')
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
// recognize all the different parts like eyes, mouth.. etc
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
// recognize feelings like being happy, smiling, sad.. etc
faceapi.nets.faceExpressionNet.loadFromUri('/models'),
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{ video: {}},
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play',() => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = {
width: video.width,
height: video.height
}
faceapi.matchDimensions(canvas,displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video,
new faceapi.TinyFaceDetectorOptions()).
withFaceLandmarks().withFaceExpressions()
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0,0, canvas.width,canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
},100)
})