-
Notifications
You must be signed in to change notification settings - Fork 0
/
rtFaceID.py
231 lines (174 loc) · 6.99 KB
/
rtFaceID.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
'''
base code adapted from
https://towardsdatascience.com/a-guide-to-face-detection-in-python-3eab0f6b9fc1
https://medium.com/@sebastiannorena/pca-principal-components-analysis-applied-to-images-of-faces-d2fc2c083371
'''
'''
Real-time face recognition script.
Prior to using this script, run trainFace.py to fit PCA model and classifier
use python3 command to execute!
'''
import cv2
import matplotlib.pyplot as plt
import dlib
from imutils import face_utils
# from sklearn.decomposition import PCA
from rxPCA import PCA
import numpy as np
import pandas as pd
from joblib import load
dataLoc = "/Users/foorx/opencv/venv/lib/python3.6/site-packages/cv2/data"
cascPath = dataLoc + "/haarcascade_frontalface_default.xml"
eyePath = dataLoc + "/haarcascade_eye.xml"
smilePath = dataLoc + "/haarcascade_smile.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
eyeCascade = cv2.CascadeClassifier(eyePath)
smileCascade = cv2.CascadeClassifier(smilePath)
font = cv2.FONT_HERSHEY_SIMPLEX
video_capture = cv2.VideoCapture(0)
#Fitted PCA model
print("Loading PCA model from disk")
pcaModel = load('pcaModel.bin')
#Dictionary of person and principal components
print("Loading pcDict from disk")
pcDict = load('pcDict.bin')
personClassifierModel = load('svcModel.bin')
expressionClassifierModel = load('svcExpressionsModel.bin')
#get from pcDict if useZeroMean is enabled
useZeroMean = False
#tune this parameter to speed up/slow down SSD #
#set to None to use all principal components
maxPrincipalComponents = None
if 'useZeroMean' in pcDict:
useZeroMean = pcDict['useZeroMean']
del pcDict['useZeroMean']
#subimage dimension
#must match dimensions used to train PCA
#get subImage dimensions from pcDict
try:
dim = pcDict['dim']
print("dimensions detected: " + str(dim))
del pcDict['dim']
except KeyError:
dim = (100, 100) #width, height
# unknownPersonErrorThreshold = 850000
unknownPersonErrorThreshold = 200000
skipFrameThreshold = 2
skipFrame = skipFrameThreshold-1
validClassificationMethod = ['SSD', 'SVM']
classificationMethod = 'SSD'
if classificationMethod not in validClassificationMethod:
raise ValueError("classificationMethod must be in " + str(validClassificationMethod) + '!')
print('classificationMethod: ' + classificationMethod)
while 1:
# Capture frame-by-frame
ret, frame = video_capture.read()
if skipFrameThreshold <= 1 or validClassificationMethod == 'SSD':
pass
else:
skipFrame+=1
if skipFrame == skipFrameThreshold:
skipFrame = 0
if skipFrame != 0:
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.7,
minNeighbors=3,
minSize=(150, 150),
flags=cv2.CASCADE_SCALE_IMAGE
)
#for debugging
subImages = list()
faceIVectors = list() #list of face subImages in imagespace
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
# smile = smileCascade.detectMultiScale(
# roi_gray,
# scaleFactor= 1.16,
# minNeighbors=35,
# minSize=(25, 25),
# flags=cv2.CASCADE_SCALE_IMAGE
# )
# for (sx, sy, sw, sh) in smile:
# cv2.rectangle(roi_color, (sh, sy), (sx+sw, sy+sh), (255, 0, 0), 2)
# cv2.putText(frame,'Smile',(x + sx,y + sy), 1, 1, (0, 255, 0), 1)
# eyes = eyeCascade.detectMultiScale(roi_gray)
# for (ex,ey,ew,eh) in eyes:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv2.putText(frame,'Eye',(x + ex,y + ey), 1, 1, (0, 255, 0), 1)
#######face identification#######
# if len(faces):
# for (x, y, w, h) in faces:
#get subimage as square
ext = min([max([w,h]), np.shape(gray)[0]-x, np.shape(gray)[1]]-y)
# extract subimage containing face
subImage = gray[y:(y+h),x:(x+w)]
#resize subimage to 100x100
subImage = cv2.resize(subImage, dim)
#for debugging
subImages.append(subImage)
#vector of pixels for each face
faceIVector = subImage.flatten('C')
if useZeroMean == True:
#apply zero mean to ignore brightnass bias
faceIVector = faceIVector-np.mean(faceIVector)
#for debugging
faceIVectors.append(faceIVector)
#required
facePC = pcaModel.transform(pd.DataFrame(list(faceIVector)))
if classificationMethod == 'SSD':
#apply SSD to guess person
guess = float('inf')
currentError = float('inf')
for person in pcDict:
#get error between the PCA and perform SSD
SSD = np.sum(np.subtract(facePC, pcDict[person]).flatten()**2)
if SSD< currentError:
currentError = SSD
guess = person
if currentError>unknownPersonErrorThreshold:
guess = "Unknown person"
guess = guess.split('_')[0]
print("Guessed person: " + guess)
print("SSD Error: " + str(currentError))
cv2.putText(frame, guess ,(x, y), font, 1,(255,0,0),5)
if classificationMethod == 'SVM':
#using SVM to classify person
#must enlist sample before feeding to model
personGuess = personClassifierModel.predict([facePC])
#take first result from singleton list
personGuess = personGuess[0]
#must enlist sample before feeding to model
expressionGuess = expressionClassifierModel.predict([facePC])
#take first result from singleton list
expressionGuess = expressionGuess[0]
#using SSD to check if person is not in any class
currentError = float('inf')
for person in pcDict:
if personGuess in person:
#get error between the PCA and perform SSD
SSD = np.sum(np.subtract(facePC, pcDict[person]).flatten()**2)
if SSD<currentError:
currentError = SSD
print('Squared Euclidean Distance: ' + str(currentError))
if currentError>unknownPersonErrorThreshold:
personGuess = "Unknown Person"
print("Guessed person: " + personGuess)
print("Guessed expression: " + expressionGuess)
cv2.putText(frame, personGuess + '(' + expressionGuess +')' ,(x, y), font, 1,(255,0,0),5)
cv2.imshow('subImage', subImage)
print("Number of faces detected:")
print(len(faces))
cv2.putText(frame,'Number of Faces : ' + str(len(faces)),(40, 40), font, 1,(255,0,0),2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()