Package pyvision :: Package types :: Module Affine
[hide private]
[frames] | no frames]

Source Code for Module pyvision.types.Affine

  1  # PyVision License 
  2  # 
  3  # Copyright (c) 2006-2008 David S. Bolme 
  4  # All rights reserved. 
  5  # 
  6  # Redistribution and use in source and binary forms, with or without 
  7  # modification, are permitted provided that the following conditions 
  8  # are met: 
  9  #  
 10  # 1. Redistributions of source code must retain the above copyright 
 11  # notice, this list of conditions and the following disclaimer. 
 12  #  
 13  # 2. Redistributions in binary form must reproduce the above copyright 
 14  # notice, this list of conditions and the following disclaimer in the 
 15  # documentation and/or other materials provided with the distribution. 
 16  #  
 17  # 3. Neither name of copyright holders nor the names of its contributors 
 18  # may be used to endorse or promote products derived from this software 
 19  # without specific prior written permission. 
 20  #  
 21  #  
 22  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
 23  # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
 24  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
 25  # A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR 
 26  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 27  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 28  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 29  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 30  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 31  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 32  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 33  ''' 
 34  This module contains the AffineTransform class and a set of factory  
 35  functions used to create AffineTransform instances given different  
 36  sets of parameters.  Most factory functions require information that  
 37  specifies the transformation and a size for the output image. 
 38  ''' 
 39   
 40   
 41   
 42  import unittest 
 43  import os.path 
 44  import math 
 45  import copy 
 46  import weakref 
 47   
 48  try: 
 49      from PIL.Image import AFFINE,NEAREST,BILINEAR,BICUBIC,ANTIALIAS #@UnusedImport 
 50  except: 
 51      from Image import AFFINE,NEAREST,BILINEAR,BICUBIC,ANTIALIAS #@UnusedImport @Reimport 
 52       
 53  from numpy import array,dot,sqrt 
 54  from numpy.linalg import inv,solve,lstsq 
 55  from scipy.ndimage import affine_transform 
 56  import random 
 57   
 58  import pyvision 
 59  import pyvision as pv 
 60  import numpy as np 
 61  import cv 
 62  import cv2 
 63   
 64  from pyvision.types.img import Image, TYPE_PIL, TYPE_MATRIX_2D, TYPE_MATRIX_RGB, TYPE_OPENCV, TYPE_OPENCV2, TYPE_OPENCV2BW 
 65  from pyvision.types.Point import Point 
 66  from pyvision.types.Rect import Rect 
 67  from pyvision.vector.RANSAC import RANSAC,LMeDs 
 68   
 69   
70 -def AffineNormalizePoints(points_b):
71 ''' 72 Create a transform that centers a set of points_b such that there mean is (0,0) 73 and then scale such that there average distance from (0,0) is 1.0 74 75 @param points_b: list of link.Point to normalize 76 @returns: an AffineTransform object 77 ''' 78 # compute the center 79 mean = Point(0,0) 80 count = 0 81 for point in points_b: 82 mean += point 83 count += 1 84 mean = (1.0/count)*mean 85 86 # mean center the points_b 87 center = AffineTranslate(-mean.X(),-mean.Y(),(0,0)) 88 points_b = center.transformPoints(points_b) 89 90 # Compute the mean distance 91 mean_dist = 0.0 92 count = 0 93 for point in points_b: 94 x,y = point.X(),point.Y() 95 dist = sqrt(x*x+y*y) 96 mean_dist += dist 97 count += 1 98 mean_dist = (1.0/count)*mean_dist 99 100 # Rescale the points_b 101 scale = AffineScale(1.0/mean_dist,(0,0)) 102 points_b = scale.transformPoints(points_b) 103 104 # compute the composite transform 105 norm = scale*center 106 107 return norm
108 109 110
111 -def AffineTranslate(dx,dy,new_size,interpolate=BILINEAR):
112 ''' 113 Create a simple translation transform 114 115 @param dx: translation in the x direction 116 @param dy: translation in the y direction 117 @param new_size: new size for the image 118 @param interpolate: PIL interpolate to use 119 ''' 120 matrix = array([[1,0,dx],[0,1,dy],[0,0,1]],'d') 121 122 return AffineTransform(matrix,new_size,interpolate)
123 124
125 -def AffineScale(scale,new_size,center=None,interpolate=BILINEAR):
126 ''' 127 Create a simple scale transform. 128 129 @param scale: the amount to scale the image. 130 @param new_size: new size for the image. 131 @param interpolate: PIL interpolate to use. 132 ''' 133 matrix = array([[scale,0,0],[0,scale,0],[0,0,1]],'d') 134 135 scale = AffineTransform(matrix,new_size,interpolate) 136 if center == None: 137 return scale 138 else: 139 return AffineTranslate(center.X(),center.Y(),new_size)*scale*AffineTranslate(-center.X(),-center.Y(),new_size)
140 141
142 -def AffineNonUniformScale(sx,sy,new_size,interpolate=BILINEAR):
143 ''' 144 Create a scale transform with different values for the x and y directions. 145 146 @param sx: scale in the x direction. 147 @param sy: scale in the y direction. 148 @param new_size: new size for the image. 149 @param interpolate: PIL interpolate to use. 150 ''' 151 matrix = array([[sx,0,0],[0,sy,0],[0,0,1]],'d') 152 153 return AffineTransform(matrix,new_size,interpolate)
154 155
156 -def AffineRotate(theta,new_size,center=None,interpolate=BILINEAR):
157 ''' 158 Create a rotation about the origin. 159 160 @param theta: the angle to rotate the image in radians. 161 @param new_size: new size for the image. 162 @param interpolate: PIL interpolate to use. 163 ''' 164 matrix = array([[math.cos(theta),-math.sin(theta),0],[math.sin(theta),math.cos(theta),0],[0,0,1]],'d') 165 166 rotate = AffineTransform(matrix,new_size,interpolate) 167 if center == None: 168 return rotate 169 else: 170 return AffineTranslate(center.X(),center.Y(),new_size)*rotate*AffineTranslate(-center.X(),-center.Y(),new_size)
171
172 -def AffineFromRect(rect,new_size,interpolate=BILINEAR):
173 ''' 174 Create a transform from a source rectangle to a new image. This basically 175 crops a rectangle out of the image and rescales it to the new size. 176 177 @param rect: the source link.Rect. 178 @param new_size: new size for the image. 179 @param interpolate: PIL interpolate to use. 180 ''' 181 w,h = new_size 182 183 x_scale = float(w)/rect.w 184 y_scale = float(h)/rect.h 185 x_trans = -rect.x*x_scale 186 y_trans = -rect.y*y_scale 187 matrix = array([[x_scale,0,x_trans],[0,y_scale,y_trans],[0,0,1]],'d') 188 189 return AffineTransform(matrix,new_size,interpolate)
190 191
192 -def AffineFromTile(center,new_size,interpolate=BILINEAR):
193 ''' 194 Extract an image tile centered on a point. 195 196 @param center: the center link.Point of the tile. 197 @param new_size: new size for the image. 198 @param interpolate: PIL interpolate to use. 199 ''' 200 w,h = new_size 201 rect = Rect(center.X()-w/2,center.Y()-h/2,w,h) 202 203 x_scale = float(w)/rect.w 204 y_scale = float(h)/rect.h 205 x_trans = -rect.x*x_scale 206 y_trans = -rect.y*y_scale 207 matrix = array([[x_scale,0,x_trans],[0,y_scale,y_trans],[0,0,1]],'d') 208 209 return AffineTransform(matrix,new_size,interpolate)
210 211
212 -def AffineFromPoints(src1,src2,dst1,dst2,new_size,interpolate=BILINEAR):
213 ''' 214 An affine transform that will rotate, translate, and scale to map one 215 set of points_b to the other. For example, to align eye coordinates in face images. 216 217 Find a transform (a,b,tx,ty) such that it maps the source points_b to the 218 destination points_b:: 219 220 a*x1-b*y1+tx = x2 221 b*x1+a*y1+ty = y2 222 223 The mapping between the two points_b creates a set of four linear equations 224 with four unknowns. This set of equations is solved to find the transform. 225 226 @param src1: the first link.Point in the source image. 227 @param src2: the second link.Point in the source image. 228 @param dst1: the first link.Point in the destination image. 229 @param dst2: the second link.Point in the destination image. 230 @param new_size: new size for the image. 231 @param interpolate: PIL interpolate to use. 232 ''' 233 234 # Compute the transformation parameters 235 A = [[src1.X(),-src1.Y(),1,0], 236 [src1.Y(),src1.X(),0,1], 237 [src2.X(),-src2.Y(),1,0], 238 [src2.Y(),src2.X(),0,1]] 239 b = [dst1.X(),dst1.Y(),dst2.X(),dst2.Y()] 240 A = array(A) 241 b = array(b) 242 result = solve(A,b) 243 244 a,b,tx,ty = result 245 # Create the transform matrix 246 matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d') 247 248 return AffineTransform(matrix,new_size,interpolate)
249 250 251 252
253 -def AffineFromPointsLS(src,dst,new_size,interpolate=BILINEAR, normalize=True):
254 ''' 255 An affine transform that will rotate, translate, and scale to map one 256 set of points_b to the other. For example, to align eye coordinates in face images. 257 258 Find a transform (a,b,tx,ty) such that it maps the source points_b to the 259 destination points_b:: 260 261 a*x1+(-b+c)*y1+tx = x2 262 (b+d)*x1+a*y1+ty = y2 263 264 This method minimizes the squared error to find an optimal fit between the 265 points_b. 266 267 @param src: a list of link.Points in the source image. 268 @param dst: a list of link.Points in the destination image. 269 @param new_size: new size for the image. 270 @param interpolate: PIL interpolate to use. 271 ''' 272 if normalize: 273 # Normalize Points 274 src_norm = AffineNormalizePoints(src) 275 src = src_norm.transformPoints(src) 276 dst_norm = AffineNormalizePoints(dst) 277 dst = dst_norm.transformPoints(dst) 278 279 # Compute the transformation parameters 280 A = [] 281 b = [] 282 for i in range(len(src)): 283 A.append([src[i].X(),-src[i].Y(),1,0]) 284 A.append([src[i].Y(), src[i].X(),0,1]) 285 b.append(dst[i].X()) 286 b.append(dst[i].Y()) 287 288 A = array(A,dtype=np.float64) 289 b = array(b,dtype=np.float64) 290 291 result,_,_,_ = lstsq(A,b) 292 293 a,b,tx,ty = result 294 # Create the transform matrix 295 matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d') 296 297 if normalize: 298 matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix)) 299 300 return AffineTransform(matrix,new_size,interpolate) 301 302
303 -def AffineFromPointsRANSAC(src,dst,new_size,interpolate=BILINEAR, normalize=True,tol=0.15):
304 ''' 305 An affine transform that will rotate, translate, and scale to map one 306 set of points_b to the other. For example, to align eye coordinates in face images. 307 308 Find a transform (a,b,tx,ty) such that it maps the source points_b to the 309 destination points_b:: 310 311 a*x1-b*y1+tx = x2 312 b*x1+a*y1+ty = y2 313 314 This method minimizes the squared error to find an optimal fit between the 315 points_b. Instead of a LS solver the RANSAC solver is used to 316 produce a transformation that is robust to outliers. 317 318 @param src: a list of link.Points in the source image. 319 @param dst: a list of link.Points in the destination image. 320 @param new_size: new size for the image. 321 @param interpolate: PIL interpolate to use. 322 ''' 323 if normalize: 324 # Normalize Points 325 src_norm = AffineNormalizePoints(src) 326 src = src_norm.transformPoints(src) 327 dst_norm = AffineNormalizePoints(dst) 328 dst = dst_norm.transformPoints(dst) 329 330 # Compute the transformation parameters 331 A = [] 332 b = [] 333 for i in range(len(src)): 334 A.append([src[i].X(),-src[i].Y(),1,0]) 335 A.append([src[i].Y(), src[i].X(),0,1]) 336 b.append(dst[i].X()) 337 b.append(dst[i].Y()) 338 339 A = array(A) 340 b = array(b) 341 342 result = RANSAC(A,b,tol=tol,group=2) 343 344 #print result,resids,rank,s 345 346 a,b,tx,ty = result 347 # Create the transform matrix 348 matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d') 349 350 if normalize: 351 matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix)) 352 353 return AffineTransform(matrix,new_size,interpolate)
354 355
356 -def AffineFromPointsLMeDs(src,dst,new_size,interpolate=BILINEAR, normalize=True):
357 ''' 358 An affine transform that will rotate, translate, and scale to map one 359 set of points_b to the other. For example, to align eye coordinates in face images. 360 361 Find a transform (a,b,tx,ty) such that it maps the source points_b to the 362 destination points_b:: 363 364 a*x1-b*y1+tx = x2 365 b*x1+a*y1+ty = y2 366 367 This method minimizes the squared error to find an optimal fit between the 368 points_b. Instead of a LS solver the RANSAC solver is used to 369 produce a transformation that is robust to outliers. 370 371 @param src: a list of link.Points in the source image. 372 @param dst: a list of link.Points in the destination image. 373 @param new_size: new size for the image. 374 @param interpolate: PIL interpolate to use. 375 ''' 376 if normalize: 377 # Normalize Points 378 src_norm = AffineNormalizePoints(src) 379 src = src_norm.transformPoints(src) 380 dst_norm = AffineNormalizePoints(dst) 381 dst = dst_norm.transformPoints(dst) 382 383 # Compute the transformation parameters 384 A = [] 385 b = [] 386 for i in range(len(src)): 387 A.append([src[i].X(),-src[i].Y(),1,0]) 388 A.append([src[i].Y(), src[i].X(),0,1]) 389 b.append(dst[i].X()) 390 b.append(dst[i].Y()) 391 392 A = array(A) 393 b = array(b) 394 395 result = LMeDs(A,b) 396 397 #print result,resids,rank,s 398 399 a,b,tx,ty = result 400 # Create the transform matrix 401 matrix = array([[a,-b,tx],[b,a,ty],[0,0,1]],'d') 402 403 if normalize: 404 matrix = dot(dst_norm.inverse,dot(matrix,src_norm.matrix)) 405 406 return AffineTransform(matrix,new_size,interpolate)
407 408
409 -def AffinePerturb(Dscale, Drotate, Dtranslate, new_size, mirror=False, flip=False, rng = None):
410 ''' 411 Generates an link.AffineTrasform that slightly perturbs the image. Primarily 412 to generate more training images. 413 414 The perturbations include small scale, rotation, and translations. The 415 transform can also mirror the image in the left/right direction or flip the 416 top and bottom as other ways to generate synthetic training images. 417 418 @param Dscale: the difference in scale [1.0+Dscale, 1.0-Dscale]. 419 @param Drotate: the range of difference in rotation [-Drotate,+Drotate] . 420 @param Dtranslate: the range of difference in translation [-Dtranslate,+Dtranslate] . 421 @param new_size: new size for the image. 422 @param mirror: Include mirror perturbations. 423 @param flip: Include flipped perturbations 424 ''' 425 tile_size = new_size 426 w,h = tile_size 427 if rng == None: 428 rng = random 429 430 tx = rng.uniform(-Dtranslate,Dtranslate) 431 ty = rng.uniform(-Dtranslate,Dtranslate) 432 if mirror: 433 sx = rng.choice([-1.,1.]) 434 else: 435 sx = 1.0 436 if flip: 437 sy = rng.choice([-1.,1.]) 438 else: 439 sy = 1.0 440 s = rng.uniform(1-Dscale,1+Dscale) 441 r = rng.uniform(-Drotate,Drotate) 442 443 there = AffineTranslate(-w/2,-h/2,tile_size) 444 flipflop = AffineNonUniformScale(sx,sy,tile_size) 445 scale = AffineScale(s,tile_size) 446 rotate = AffineRotate(r,tile_size) 447 translate = AffineTranslate(tx,ty,tile_size) 448 back = AffineTranslate(w/2,h/2,tile_size) 449 affine = back*translate*rotate*scale*flipflop*there 450 451 return affine
452 453
454 -class AffineTransform:
455 ''' 456 The AffineTransform class is used to transform images and points_b back and 457 and forth between different coordinate systems. 458 ''' 459
460 - def __init__(self,matrix,new_size,interpolate=BILINEAR):
461 ''' 462 Constructor for the AffineTransform. See also the affine transform factories. 463 464 @param matrix: a 3-by-3 matrix that defines the transformation. 465 @param new_size: the size of any new images created by this affine transform. 466 @param interpolate: the image filtering function used for interpolating between pixels. 467 @returns: an AffineTransform object 468 ''' 469 self.matrix = matrix 470 self.inverse = inv(matrix) 471 self.size = int(new_size[0]),int(new_size[1]) 472 self.interpolate = interpolate
473
474 - def __call__(self,data):
475 ''' 476 This is a simple interface to transform images or points_b. Simply 477 call the affine transform like a function and it will try to automatically 478 transform the argument. 479 480 @param data: an image, point, or list of points_b. 481 ''' 482 if isinstance(data,pv.Image): 483 return self.transformImage(data) 484 elif isinstance(data,pv.Point): 485 return self.transformPoint(data) 486 else: # assume this is a list of points_b 487 return self.transformPoints(data)
488
489 - def invert(self,data):
490 ''' 491 This is a simple interface to transform images or points_b. Simply 492 call invert with the points_b or list of points_b and it will automatically 493 call the correct function. 494 495 @param data: an image, point, or list of points_b. 496 ''' 497 if isinstance(data,pv.Image): 498 return self.invertImage(data) 499 elif isinstance(data,pv.Point): 500 return self.invertPoint(data) 501 else: # assume this is a list of points_b 502 return self.invertPoints(data)
503
504 - def invertImage(self,im_a, use_orig=True):
505 ''' 506 Perform the inverse affine transformation on the image. 507 ''' 508 return self.transformImage(im_a,use_orig=use_orig,inverse=True)
509
510 - def transformImage(self,im_a, use_orig=True, inverse=False):
511 ''' 512 Transforms an image into the new coordinate system. 513 514 If this image was produced via an affine transform of another image, 515 this method will attempt to trace weak references to the original image 516 and directly compute the new image from that image to improve accuracy. 517 To accomplish this a weak reference to the original source image and 518 the affine matrix used for the transform are added to any image 519 produced by this method. This can be disabled using the use_orig 520 parameter. 521 522 523 @param im_a: an Image object 524 @param use_orig: (True or False) attempts to find and use the original image as the source to avoid an accumulation of errors. 525 @returns: the transformed image 526 ''' 527 #TODO: does not support opencv images. see Perspective.py 528 prev_im = im_a 529 530 if inverse: 531 inverse = self.matrix 532 else: 533 inverse = self.inverse 534 535 if use_orig: 536 # Find the oldest image used to produce this one by following week 537 # references. 538 539 # Check to see if there is an aff_prev list 540 if hasattr(prev_im,'aff_prev'): 541 542 # If there is... search that list for the oldest image 543 found_prev = False 544 for i in range(len(prev_im.aff_prev)): 545 ref,cmat = prev_im.aff_prev[i] 546 if not found_prev and ref(): 547 im_a = ref() 548 mat = np.eye(3) 549 found_prev = True 550 551 if found_prev: 552 mat = np.dot(mat,cmat) 553 554 if found_prev: 555 inverse = np.dot(mat,inverse) 556 557 if im_a.getType() == TYPE_PIL: 558 data = inverse[:2,:].flatten() 559 #data = (matrix[0,0],matrix[0,1],matrix[0,2],matrix[1,0],matrix[1,1],matrix[1,2]) 560 pil = im_a.asPIL().transform(self.size, AFFINE, data, self.interpolate) 561 result = Image(pil) 562 563 elif im_a.getType() == TYPE_MATRIX_2D: 564 # Transform a matrix 2d 565 mat = im_a.asMatrix2D() 566 mat = affine_transform(mat, self.inverse[:2,:2], offset=self.inverse[:2,2]) 567 result = Image(mat[:self.size[0],:self.size[1]]) 568 569 elif im_a.getType() == TYPE_MATRIX_RGB: 570 # Transform a matrix 3d 571 mat = im_a.asMatrix3D() 572 c0 = mat[0,:,:] 573 c1 = mat[1,:,:] 574 c2 = mat[2,:,:] 575 c0 = affine_transform(c0, self.inverse[:2,:2], offset=self.inverse[:2,2]) 576 c1 = affine_transform(c1, self.inverse[:2,:2], offset=self.inverse[:2,2]) 577 c2 = affine_transform(c2, self.inverse[:2,:2], offset=self.inverse[:2,2]) 578 mat = np.array([c0,c1,c2],dtype=np.float32) 579 result = Image(mat[:,:self.size[0],:self.size[1]]) 580 581 elif im_a.getType() == TYPE_OPENCV: 582 matrix = pv.NumpyToOpenCV(self.matrix) 583 src = im_a.asOpenCV() 584 dst = cv.CreateImage( (self.size[0],self.size[1]), cv.IPL_DEPTH_8U, src.nChannels ); 585 cv.WarpPerspective( src, dst, matrix, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS,cv.ScalarAll(128)) 586 result = pv.Image(dst) 587 588 elif im_a.getType() == TYPE_OPENCV2: 589 # Transform an opencv 2 image 590 src = im_a.asOpenCV2() 591 dst = cv2.warpPerspective(src, self.matrix, self.size) 592 result = pv.Image(dst) 593 594 elif im_a.getType() == TYPE_OPENCV2BW: 595 # Transform a bw opencv 2 image 596 src = im_a.asOpenCV2BW() 597 dst = cv2.warpPerspective(src, self.matrix, self.size) 598 result = pv.Image(dst) 599 600 else: 601 raise NotImplementedError("Unhandled image type for affine transform.") 602 603 604 # Check to see if there is an aff_prev list for this object 605 if use_orig and hasattr(prev_im,'aff_prev'): 606 # Create one if not 607 result.aff_prev = copy.copy(prev_im.aff_prev) 608 else: 609 result.aff_prev = [] 610 611 # Append the prev image and new transform 612 result.aff_prev.append( (weakref.ref(prev_im), self.inverse) ) 613 614 return result
615 616
617 - def transformPoint(self,pt):
618 ''' 619 Transform a point from the old image to the new image. 620 621 @param pt: the point 622 @returns: the new point 623 ''' 624 vec = dot(self.matrix,pt.asVector2H()) 625 return Point(x=vec[0,0],y=vec[1,0],w=vec[2,0])
626 627
628 - def transformPoints(self,pts):
629 ''' 630 Transform a set of points_b from the old image to the new image. 631 632 @param pts: a list of points_b. 633 @returns: a list of transformed points_b. 634 ''' 635 return [ self.transformPoint(pt) for pt in pts ]
636 637
638 - def invertPoint(self,pt):
639 ''' 640 Transforms a Point from the new coordinate system to 641 the old coordinate system. 642 643 @param pt: a single point 644 @returns: the transformed point 645 ''' 646 vec = dot(self.inverse,pt.asVector2H()) 647 return Point(x=vec[0,0],y=vec[1,0],w=vec[2,0])
648 649
650 - def invertPoints(self,pts):
651 ''' 652 Transforms a list of oints from the new coordinate system to 653 the old coordinate system. 654 655 @param pts: a list of Points 656 @returns: the transformed Points 657 ''' 658 return [ self.invertPoint(pt) for pt in pts ]
659 660
661 - def asMatrix(self):
662 ''' 663 @returns: the transform as a 3 by 3 matrix 664 ''' 665 return self.matrix
666 667
668 - def __mul__(self,affine):
669 ''' 670 Used to concatenate transforms. For example:: 671 672 # This code first scales and then translates 673 S = AffineScale(2.0) 674 T = AffineTranslate(4,5) 675 A = T*S 676 new_im = A.transformImage(old_im) 677 678 @returns: a single AffineTransform which is the the same as both affine transforms. 679 ''' 680 return AffineTransform(dot(self.matrix,affine.matrix),self.size,self.interpolate)
681 682 683 # TODO: Add unit tests
684 -class _AffineTest(unittest.TestCase):
685
686 - def setUp(self):
687 fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') 688 self.test_image = Image(fname)
689 #self.test_image.show() 690
691 - def test_rotation(self):
692 transform = AffineRotate(3.14/8,(640,480)) 693 _ = transform.transformImage(self.test_image) 694 # im_a.show() 695 696 pt = transform.transformPoint(Point(320,240)) 697 self.assertAlmostEqual(pt.X(),203.86594448424472) 698 self.assertAlmostEqual(pt.Y(),344.14920700118842) 699 700 pt = transform.invertPoint(Point(320,240)) 701 self.assertAlmostEqual(pt.X(),387.46570317672939) 702 self.assertAlmostEqual(pt.Y(),99.349528744542198)
703
704 - def test_scale(self):
705 transform = AffineScale(1.5,(640,480)) 706 _ = transform.transformImage(self.test_image) 707 #im_a.show() 708 709 pt = transform.transformPoint(Point(320,240)) 710 self.assertAlmostEqual(pt.X(),480.) 711 self.assertAlmostEqual(pt.Y(),360.) 712 713 pt = transform.invertPoint(Point(320,240)) 714 self.assertAlmostEqual(pt.X(),213.33333333333331) 715 self.assertAlmostEqual(pt.Y(),160.)
716
717 - def test_translate(self):
718 transform = AffineTranslate(10.,15.,(640,480)) 719 _ = transform.transformImage(self.test_image) 720 #im_a.show() 721 722 pt = transform.transformPoint(Point(320,240)) 723 self.assertAlmostEqual(pt.X(),330.) 724 self.assertAlmostEqual(pt.Y(),255.) 725 726 pt = transform.invertPoint(Point(320,240)) 727 self.assertAlmostEqual(pt.X(),310.) 728 self.assertAlmostEqual(pt.Y(),225.)
729
730 - def test_from_rect(self):
731 732 transform = AffineFromRect(Rect(100,100,300,300),(100,100)) 733 _ = transform.transformImage(self.test_image) 734 #im_a.show() 735 736 pt = transform.transformPoint(Point(320,240)) 737 self.assertAlmostEqual(pt.X(),73.333333333333329) 738 self.assertAlmostEqual(pt.Y(),46.666666666666671) 739 740 pt = transform.invertPoint(Point(50.,50.)) 741 self.assertAlmostEqual(pt.X(),250.0) 742 self.assertAlmostEqual(pt.Y(),250.0)
743
744 - def test_from_points(self):
745 # TODO: Fix this test 746 pass
747
748 - def test_sim_least_sqr(self):
749 # TODO: Fix this test 750 pass
751
752 - def test_affine_least_sqr(self):
753 # TODO: Fix this test 754 pass
755
756 - def test_affine_mul(self):
757 # TODO: FIx this test 758 pass
759
760 - def test_affine_Matrix2D(self):
761 im = pv.Image(pv.BABOON) 762 test_im = pv.Image(im.asMatrix2D()) 763 affine = pv.AffineFromRect(pv.CenteredRect(256,256,128,128),(64,64)) 764 765 # Transform the images 766 im = affine(im) 767 test_im = affine(test_im) 768 769 # Correlate the resulting images 770 vec1 = pv.unit(im.asMatrix2D().flatten()) 771 vec2 = pv.unit(test_im.asMatrix2D().flatten()) 772 score = np.dot(vec1,vec2) 773 774 self.assertGreater(score, 0.998)
775 776
777 - def test_affine_OpenCV2BW(self):
778 im = pv.Image(pv.BABOON) 779 test_im = pv.Image(im.asOpenCV2BW()) 780 affine = pv.AffineFromRect(pv.CenteredRect(256,256,128,128),(64,64)) 781 782 # Transform the images 783 im = affine(im) 784 test_im = affine(test_im) 785 786 # Correlate the resulting images 787 vec1 = pv.unit(im.asMatrix2D().flatten()) 788 vec2 = pv.unit(test_im.asMatrix2D().flatten()) 789 score = np.dot(vec1,vec2) 790 791 self.assertGreater(score, 0.998)
792 793
794 - def test_affine_OpenCV2(self):
795 im = pv.Image(pv.BABOON) 796 test_im = pv.Image(im.asOpenCV2()) 797 affine = pv.AffineFromRect(pv.CenteredRect(256,256,128,128),(64,64)) 798 799 # Transform the images 800 im = affine(im) 801 test_im = affine(test_im) 802 803 # Correlate the resulting images 804 vec1 = pv.unit(im.asMatrix3D().flatten()) 805 vec2 = pv.unit(test_im.asMatrix3D().flatten()) 806 score = np.dot(vec1,vec2) 807 808 self.assertGreater(score, 0.998)
809 810
811 - def test_affine_Matrix3D(self):
812 im = pv.Image(pv.BABOON) 813 test_im = pv.Image(im.asMatrix3D()) 814 affine = pv.AffineFromRect(pv.CenteredRect(256,256,128,128),(64,64)) 815 816 # Transform the images 817 im = affine(im) 818 test_im = affine(test_im) 819 820 # Correlate the resulting images 821 vec1 = pv.unit(im.asMatrix3D().flatten()) 822 vec2 = pv.unit(test_im.asMatrix3D().flatten()) 823 score = np.dot(vec1,vec2) 824 825 self.assertGreater(score, 0.998)
826 827
828 - def test_affine_opencv(self):
829 # TODO: FIx this test 830 pass
831
832 - def test_prev_ref1(self):
833 fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') 834 im_a = Image(fname) 835 ref = weakref.ref(im_a) 836 837 self.assertEquals(ref(), im_a) 838 839 tmp = im_a 840 del im_a 841 842 self.assertEquals(ref(), tmp) 843 844 del tmp 845 846 self.assertEquals(ref(), None)
847 848
849 - def test_prev_ref2(self):
850 fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') 851 im_a = Image(fname) 852 #im_a.show() 853 w,h = im_a.size 854 855 # Try scaling down and then scaling back up 856 tmp1 = AffineScale(0.1,(w/10,h/10)).transformImage(im_a) 857 #tmp1.show() 858 859 tmp2 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=False) 860 tmp2.annotateLabel(pv.Point(10,10), "This image should be blurry.") 861 #tmp2.show() 862 863 tmp3 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=True) 864 tmp3.annotateLabel(pv.Point(10,10), "This image should be sharp.") 865 #tmp3.show() 866 867 del im_a 868 869 tmp4 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=True) 870 tmp4.annotateLabel(pv.Point(10,10), "This image should be blurry.")
871 #tmp4.show() 872
873 - def test_prev_ref3(self):
874 fname = os.path.join(pv.__path__[0],'data','nonface','NONFACE_13.jpg') 875 torig = tprev = im_a = Image(fname) 876 #im_a.show() 877 w,h = im_a.size 878 879 # Scale 880 aff = AffineScale(0.5,(w/2,h/2)) 881 accu = aff 882 torig = aff.transformImage(torig) 883 tprev = aff.transformImage(tprev,use_orig=False) 884 taccu = accu.transformImage(im_a) 885 886 torig.annotateLabel(pv.Point(10,10), "use_orig = True") 887 tprev.annotateLabel(pv.Point(10,10), "use_orig = False") 888 taccu.annotateLabel(pv.Point(10,10), "accumulated") 889 890 #torig.show() 891 #tprev.show() 892 #taccu.show() 893 894 # Translate 895 aff = AffineTranslate(20,20,(w/2,h/2)) 896 accu = aff*accu 897 torig = aff.transformImage(torig) 898 tprev = aff.transformImage(tprev,use_orig=False) 899 taccu = accu.transformImage(im_a) 900 901 torig.annotateLabel(pv.Point(10,10), "use_orig = True") 902 tprev.annotateLabel(pv.Point(10,10), "use_orig = False") 903 taccu.annotateLabel(pv.Point(10,10), "accumulated") 904 905 #torig.show() 906 #tprev.show() 907 #taccu.show() 908 909 910 # Rotate 911 aff = AffineRotate(np.pi/4,(w/2,h/2)) 912 accu = aff*accu 913 torig = aff.transformImage(torig) 914 tprev = aff.transformImage(tprev,use_orig=False) 915 taccu = accu.transformImage(im_a) 916 917 torig.annotateLabel(pv.Point(10,10), "use_orig = True") 918 tprev.annotateLabel(pv.Point(10,10), "use_orig = False") 919 taccu.annotateLabel(pv.Point(10,10), "accumulated") 920 921 #torig.show() 922 #tprev.show() 923 #taccu.show() 924 925 926 927 # Translate 928 aff = AffineTranslate(100,-10,(w/2,h/2)) 929 accu = aff*accu 930 torig = aff.transformImage(torig) 931 tprev = aff.transformImage(tprev,use_orig=False) 932 taccu = accu.transformImage(im_a) 933 934 torig.annotateLabel(pv.Point(10,10), "use_orig = True") 935 tprev.annotateLabel(pv.Point(10,10), "use_orig = False") 936 taccu.annotateLabel(pv.Point(10,10), "accumulated") 937 938 #torig.show() 939 #tprev.show() 940 #taccu.show() 941 942 943 # Scale 944 aff = AffineScale(2.0,(w,h)) 945 accu = aff*accu 946 torig = aff.transformImage(torig) 947 tprev = aff.transformImage(tprev,use_orig=False) 948 taccu = accu.transformImage(im_a) 949 950 torig.annotateLabel(pv.Point(10,10), "use_orig = True") 951 tprev.annotateLabel(pv.Point(10,10), "use_orig = False") 952 taccu.annotateLabel(pv.Point(10,10), "accumulated")
953 954 #torig.show() 955 #tprev.show() 956 #taccu.show() 957