1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 from os.path import join
35 import unittest
36 import random
37 from math import pi
38 import os.path
39
40 import pyvision as pv
41 import numpy as np
42
43 from pyvision.face.CascadeDetector import CascadeDetector
44 from pyvision.analysis.face import EyesFile
45 from pyvision.point.PointLocator import SVMLocator,KRRLocator
46 from pyvision.analysis.FaceAnalysis.FaceDetectionTest import face_from_eyes, is_success
47 from pyvision.analysis.FaceAnalysis.EyeDetectionTest import EyeDetectionTest
48 from pyvision.vector import VectorClassifier
49 from pyvision.vector import SVM
50
51
53 '''
54 This class detects faces and then returns the detection rectangles and
55 the eye coordinates.
56 '''
57
58 - def __init__(self, face_detector=CascadeDetector(), tile_size=(128,128), validate=None, n_iter=1, annotate=False,**kwargs):
86
87
89 '''Train an eye detector givin a full image and the eye coordinates.'''
90
91
92 true_rect = face_from_eyes(left_eye,right_eye)
93
94
95 rects = self.face_detector.detect(im)
96
97
98 for pred_rect in rects:
99 if is_success(pred_rect,true_rect):
100
101 affine = pv.AffineFromRect(pred_rect,self.tile_size)
102
103 w,h = self.tile_size
104
105 if self.perturbations:
106
107 center = pv.AffineTranslate(-0.5*w,-0.5*h,self.tile_size)
108 rotate = pv.AffineRotate(random.uniform(-pi/8,pi/8),self.tile_size)
109 scale = pv.AffineScale(random.uniform(0.9,1.1),self.tile_size)
110 translate = pv.AffineTranslate(random.uniform(-0.05*w,0.05*w),
111 random.uniform(-0.05*h,0.05*h),
112 self.tile_size)
113 inv_center = pv.AffineTranslate(0.5*w,0.5*h,self.tile_size)
114
115 affine = inv_center*translate*scale*rotate*center*affine
116
117
118 cropped = affine.transformImage(im)
119 cropped = pv.meanStd(cropped)
120
121
122 leye = affine.transformPoint(left_eye)
123 reye = affine.transformPoint(right_eye)
124
125
126 self.training_labels.append((leye,reye))
127
128 self.normalize.addTraining(0.0,cropped)
129
130
131
132
133 return
134
135
136 self.detection_failures += 1
137
138 - def train(self,**kwargs):
139 '''
140 Train the eye locators.
141 '''
142 self.normalize.trainNormalization()
143
144 vectors = self.normalize.vectors
145
146
147
148
149 for i in range(len(self.training_labels)):
150 leye,reye = self.training_labels[i]
151 vec = vectors[i]
152
153 self.left_locator.addTraining(vec,leye)
154 self.right_locator.addTraining(vec,reye)
155
156 self.left_locator.train(**kwargs)
157 self.right_locator.train(**kwargs)
158
159 self.left_eye = self.left_locator.mean
160 self.right_eye = self.right_locator.mean
161
162 del self.normalize.labels
163 del self.normalize.vectors
164 del self.training_labels
165
166
168 '''
169 @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye)
170 '''
171 result = []
172
173 rects = self.face_detector.detect(im)
174
175
176 for rect in rects:
177
178
179 affine = pv.AffineFromRect(rect,self.tile_size)
180 cropped = affine.transformImage(im)
181
182 for _ in range(self.n_iter):
183 cropped = pv.meanStd(cropped)
184
185 data = cropped.asMatrix2D().flatten()
186 data = np.array(data,'d').flatten()
187
188 data = self.normalize.normalizeVector(data)
189
190 pleye = self.left_locator.predict(data)
191 preye = self.right_locator.predict(data)
192
193 pleye = affine.invertPoint(pleye)
194 preye = affine.invertPoint(preye)
195
196
197 affine = pv.AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size)
198 cropped = affine.transformImage(im)
199
200
201
202 reg = cropped
203
204 if self.validate != None and not self.validate(reg):
205
206 if self.annotate:
207 im.annotateRect(rect,color='red')
208 im.annotatePoint(pleye,color='red')
209 im.annotatePoint(preye,color='red')
210 continue
211
212 if self.annotate:
213 reg.annotatePoint(self.left_eye,color='green')
214 reg.annotatePoint(self.right_eye,color='green')
215 im.annotatePoint(pleye,color='green')
216 im.annotatePoint(preye,color='green')
217 im.annotateRect(rect,color='green')
218 result.append((reg,rect,pleye,preye))
219
220 return result
221
222
224 '''
225 This class detects faces and then returns the detection rectangles and
226 the eye coordinates.
227 '''
228
229 - def __init__(self, face_detector=CascadeDetector(), tile_size=(128,128), subtile_size=(32,32), left_center=pv.Point(39.325481787836871,50.756936769089975), right_center=pv.Point(91.461135538006289,50.845357457309881), validate=None, n_iter=1, annotate=False,**kwargs):
230 '''
231 Create an eye locator. This default implentation uses a
232 cascade classifier for face detection and then SVR for eye
233 location.
234 '''
235
236 self.face_detector = face_detector
237 self.left_center = left_center
238 self.right_center = right_center
239 self.tile_size = tile_size
240 self.subtile_size = subtile_size
241 self.validate = validate
242 self.n_iter = n_iter
243 self.annotate = annotate
244 self.perturbations = True
245
246
247 self.detection_failures = 0
248
249
250 self.createLocators(**kwargs)
251
252
254 ''' Create two point locators that use the methods of interest '''
255 raise NotImplementedError
256
257
259
260 affine = pv.AffineFromRect(detection,self.tile_size)
261
262 w,h = self.tile_size
263
264 if self.perturbations:
265
266 center = pv.AffineTranslate(-0.5*w,-0.5*h,self.tile_size)
267 rotate = pv.AffineRotate(random.uniform(-pi/8,pi/8),self.tile_size)
268 scale = pv.AffineScale(random.uniform(0.9,1.1),self.tile_size)
269 translate = pv.AffineTranslate(random.uniform(-0.05*w,0.05*w),
270 random.uniform(-0.05*h,0.05*h),
271 self.tile_size)
272 inv_center = pv.AffineTranslate(0.5*w,0.5*h,self.tile_size)
273
274 affine = inv_center*translate*scale*rotate*center*affine
275
276
277 lx=self.left_center.X()-self.subtile_size[0]/2
278 ly=self.left_center.Y()-self.subtile_size[1]/2
279 rx=self.right_center.X()-self.subtile_size[0]/2
280 ry=self.right_center.Y()-self.subtile_size[1]/2
281
282 laffine = pv.AffineFromRect(pv.Rect(lx,ly,self.subtile_size[0],self.subtile_size[1]),self.subtile_size)*affine
283 raffine = pv.AffineFromRect(pv.Rect(rx,ry,self.subtile_size[0],self.subtile_size[1]),self.subtile_size)*affine
284 return laffine,raffine
285
286
288 '''Train an eye detector givin a full image and the eye coordinates.'''
289
290
291 true_rect = face_from_eyes(left_eye,right_eye)
292
293
294 rects = self.face_detector.detect(im)
295
296
297 for pred_rect in rects:
298 if is_success(pred_rect,true_rect):
299
300 laffine,raffine = self.generateTransforms(pred_rect)
301
302 lcropped = laffine.transformImage(im)
303 rcropped = raffine.transformImage(im)
304
305
306 lcropped = pv.meanStd(lcropped)
307 rcropped = pv.meanStd(rcropped)
308
309
310 leye = laffine.transformPoint(left_eye)
311 reye = raffine.transformPoint(right_eye)
312
313
314 self.left_locator.addTraining(lcropped,leye)
315 self.right_locator.addTraining(rcropped,reye)
316
317
318 return
319
320
321 self.detection_failures += 1
322
323 - def train(self,**kwargs):
324 '''
325 Train the eye locators.
326 '''
327 self.left_locator.train(**kwargs)
328 self.right_locator.train(**kwargs)
329
330 self.left_eye = self.left_locator.mean
331 self.right_eye = self.right_locator.mean
332
333 self.perturbations=False
334
335
337 '''
338 @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye)
339 '''
340 result = []
341
342 rects = self.face_detector.detect(im)
343
344
345 for rect in rects:
346
347
348 laffine,raffine = self.generateTransforms(rect)
349 lcropped = laffine.transformImage(im)
350 rcropped = raffine.transformImage(im)
351
352
353 lcropped = pv.meanStd(lcropped)
354 rcropped = pv.meanStd(rcropped)
355
356 pleye = self.left_locator.predict(lcropped)
357 preye = self.right_locator.predict(rcropped)
358
359 pleye = laffine.invertPoint(pleye)
360 preye = raffine.invertPoint(preye)
361
362
363 affine = pv.AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size)
364 reg = affine.transformImage(im)
365
366 if self.validate != None and not self.validate(reg):
367
368
369 if self.annotate:
370 im.annotateRect(rect,color='red')
371 im.annotatePoint(pleye,color='red')
372 im.annotatePoint(preye,color='red')
373 continue
374
375 if self.annotate:
376 reg.annotatePoint(self.left_eye,color='green')
377 reg.annotatePoint(self.right_eye,color='green')
378 im.annotatePoint(pleye,color='green')
379 im.annotatePoint(preye,color='green')
380 im.annotateRect(rect,color='green')
381 result.append((reg,rect,pleye,preye))
382
383 return result
384
385
387 '''
388 This class detects faces and then returns the detection rectangles and
389 the eye coordinates.
390 '''
395
396
398 '''
399 This class detects faces and then returns the detection rectangles and
400 the eye coordinates.
401 '''
403 ''' Create two point locators that use the methods of interest '''
404 self.left_locator = KRRLocator(**kwargs)
405 self.right_locator = KRRLocator(**kwargs)
406
407
408
409
411 '''
412 Train a face finder using an Eye Coordanates file and a face dataset.
413 '''
414
415
416 face_finder = SVMEyeDetector(**kwargs)
417
418 if training_set == None:
419 training_set = eyes_file.files()
420
421 if training_size == None or training_size > len(training_set):
422 training_size = len(training_set)
423
424 for filename in training_set[:training_size]:
425
426 im_name = join(image_dir,filename+image_ext)
427
428
429 im = pv.Image(im_name)
430
431 eyes = eyes_file.getEyes(filename)
432 for left,right in eyes:
433 face_finder.addTraining(left,right,im)
434
435 face_finder.train()
436 return face_finder
437
438
439
440
441
444 self.images = []
445 self.names = []
446
447 SCRAPS_FACE_DATA = os.path.join(pv.__path__[0],"data","csuScrapShots")
448
449
450 self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
451 for filename in self.eyes.files():
452 img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
453 self.images.append(img)
454 self.names.append(filename)
455
456 self.assert_( len(self.images) == 173 )
457
459 '''
460 This trains the FaceFinder on the scraps database.
461 '''
462
463
464
465 eyes_filename = join(pv.__path__[0],'data','csuScrapShots','coords.txt')
466
467 eyes_file = EyesFile(eyes_filename)
468
469
470 cascade_file = join(pv.__path__[0],'config','facedetector_celebdb2.xml')
471
472 face_detector = CascadeDetector(cascade_file)
473
474 image_dir = join(pv.__path__[0],'data','csuScrapShots')
475
476 ed = SVMEyeDetectorFromDatabase(eyes_file, image_dir, image_ext=".pgm", face_detector=face_detector,random_seed=0)
477 edt = EyeDetectionTest(name='scraps')
478
479
480 for img in self.images:
481
482 faces = ed.detect(img)
483
484
485 pred_eyes = []
486 for _,_,pleye,preye in faces:
487
488 pred_eyes.append((pleye,preye))
489
490 truth_eyes = self.eyes.getEyes(img.filename)
491 edt.addSample(truth_eyes, pred_eyes, im=img, annotate=False)
492
493
494
495
496
497
498
499
500