1 '''
2 Created on Mar 27, 2012
3
4 @author: bolme
5 '''
6 import pyvision as pv
7 import os
8
9
10
11 import numpy as np
12
13
14 import pyvision.face.CascadeDetector as cd
15
16 WEIGHTS = np.array([[ 5.62758656e+00],
17 [ 1.43633799e+00],
18 [ 1.64466919e+01],
19 [ 1.39618695e+02],
20 [ 6.65913984e+00],
21 [ -2.92879480e+01],
22 [ 4.61796737e+00],
23 [ -5.88458117e+00],
24 [ 1.18577036e+03],
25 [ 1.17193154e+03],
26 [ 1.18310418e+03],
27 [ 1.00350301e+06],
28 [ 1.98506471e+06],
29 [ 2.01482871e+06],
30 [ 9.81550348e+05],
31 [ 1.99287479e+06],
32 [ 1.01131996e+06],
33 [ -5.82410452e+00]])
34
36 '''
37 A detector that uses multiple detectors and quality measures to accuratly
38 detect faces. The goal is to be slow but accurate.
39 '''
40
58
59 - def detect(self,im,annotate=True):
60 '''
61 This performs face detection and returns an ordered list of faces sorted by confidence scores.
62 '''
63
64 detections = self.raw_detections(im)
65
66
67 faces = []
68 for each in detections:
69 rect = each[0]
70
71
72 score = self.quality.predict(each[2:])
73 rect.detector = each[1]
74 rect.score = score[0]
75 faces.append(rect)
76
77
78 if self.default == True:
79 w,h = im.size
80 s = 0.75*min(w,h)
81 default = pv.CenteredRect(0.5*w,0.5*h,s,s)
82 default.score = 0.0
83 default.detector = "DEFAULT"
84 faces.append(default)
85
86 faces.sort(lambda x,y: -cmp(x.score,y.score))
87
88 return faces
89
91 '''
92 Run the face detectors with additional quality parameters.
93 '''
94 W,H = im.size
95
96 scale = 1.0/self.prescale
97 im = im.scale(self.prescale)
98
99 faces = self.fd(im)
100 faces = [[scale*rect,'FACE'] for rect in faces]
101
102 heads = self.hd(im)
103
104
105 hfaces = []
106 for each in heads:
107
108 x,y,w,_ = each.asCenteredTuple()
109 y = y - 0.10*w
110 w = 0.33*w
111 hfaces.append([scale*pv.CenteredRect(x,y,w,w),'HEAD'])
112
113
114 for face in faces:
115 best_overlap = 0.0
116 for head in hfaces:
117 best_overlap = max(best_overlap,face[0].similarity(head[0]))
118 if best_overlap > 0.7:
119 face.append(1.0)
120 else:
121 face.append(0.0)
122
123
124 for head in hfaces:
125 best_overlap = 0.0
126 for face in faces:
127 best_overlap = max(best_overlap,head[0].similarity(face[0]))
128 if best_overlap > 0.7:
129 head.append(1.0)
130 else:
131 head.append(0.0)
132
133 detections = faces + hfaces
134
135
136 for each in detections:
137 tile = pv.AffineFromRect(self.prescale*each[0],(128,128))(im)
138
139
140
141 each.append(1.0*(each[1] == 'FACE'))
142
143
144 each.append(np.sqrt(each[0].area())/np.sqrt(W*H))
145 each.append(np.sqrt(each[0].area())/np.sqrt(W*H)**2)
146
147
148 each.append(tile.asMatrix2D().std()/255.0)
149 each.append((tile.asMatrix2D().std()/255.0)**2)
150
151
152 each.append(tile.asMatrix2D().mean()/255.0)
153 each.append((tile.asMatrix2D().mean()/255.0)**2)
154
155
156 rgb = tile.asMatrix3D()
157 t = rgb.mean() + 0.001
158
159
160 r = -1+rgb[0,:,:].mean()/t
161 g = -1+rgb[1,:,:].mean()/t
162 b = -1+rgb[2,:,:].mean()/t
163
164
165 each += [r,g,b,r*r,r*g,r*b,g*g,g*b,b*b]
166
167 return detections
168
169
170
171 - def train(self, image_dir, eye_data):
172 '''
173 This function trains the logistic regression model to score the meta-detections.
174
175 Images must be oriented so that the face is upright.
176
177 @param image_dir: A pathname containing images.
178 @param eye_data: a list of tuples (from csv) filename,eye1x,eye1y,eye2x,eye2y
179 '''
180 print "Training"
181
182 data_set = []
183
184 progress = pv.ProgressBar(maxValue=len(eye_data))
185 for row in eye_data:
186 filename = row[0]
187 print "Processing",row
188 points = [float(val) for val in row[1:]]
189 eye1 = pv.Point(points[0],points[1])
190 eye2 = pv.Point(points[2],points[3])
191
192
193 ave_dist = np.abs(cd.AVE_LEFT_EYE.X() - cd.AVE_RIGHT_EYE.X())
194 y_height = 0.5*(cd.AVE_LEFT_EYE.Y() + cd.AVE_RIGHT_EYE.Y())
195 x_center = 0.5*(eye1.X() + eye2.X())
196 x_dist = np.abs(eye1.X() - eye2.X())
197 width = x_dist/ave_dist
198 y_center = 0.5*(eye1.Y() + eye2.Y()) + (0.5-y_height)*width
199 truth = pv.CenteredRect(x_center,y_center,width,width)
200
201
202 im = pv.Image(os.path.join(image_dir,filename))
203
204
205 detections = self.raw_detections(im)
206
207
208
209
210
211 scores = [truth.similarity(each[0]) for each in detections]
212
213 for i in range(len(scores)):
214 score = scores[i]
215 detection = detections[i]
216 success = 0.0
217 if score > 0.7:
218 success = 1.0
219 row = detection[1],success,detection[2:]
220 print row
221 data_set.append(row)
222
223
224 im = im.scale(self.prescale)
225 colors = {'FACE':'yellow','HEAD':'blue'}
226 for detection in detections:
227
228 rect = self.prescale*detection[0]
229 im.annotateRect(rect,color=colors[detection[1]])
230 im.annotateRect(self.prescale*truth,color='red')
231 progress.updateAmount()
232 progress.show()
233 print
234
235 progress.finish()
236 obs = [each[1] for each in data_set]
237 data = [each[2] for each in data_set]
238
239 print obs
240 print data
241
242 self.quality.train(obs,data)
243
244 return
245
246 for each in data_set:
247 self.quality[each[0]][1].append(each[1])
248 self.quality[each[0]][2].append(each[2])
249
250 for key,value in self.quality.iteritems():
251 print "Training:",key
252 obs = value[1]
253 data = value[2]
254 assert len(obs) == len(data)
255 value[0].train(obs,data)
256 print value[0].params
257
258 print "Done Training"
259
260
262 return self.detect(*args,**kwargs)
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278