Package pyvision :: Package types :: Module Perspective
[hide private]
[frames] | no frames]

Source Code for Module pyvision.types.Perspective

  1  # PyVision License 
  2  # 
  3  # Copyright (c) 2006-2008 David S. Bolme 
  4  # All rights reserved. 
  5  # 
  6  # Redistribution and use in source and binary forms, with or without 
  7  # modification, are permitted provided that the following conditions 
  8  # are met: 
  9  #  
 10  # 1. Redistributions of source code must retain the above copyright 
 11  # notice, this list of conditions and the following disclaimer. 
 12  #  
 13  # 2. Redistributions in binary form must reproduce the above copyright 
 14  # notice, this list of conditions and the following disclaimer in the 
 15  # documentation and/or other materials provided with the distribution. 
 16  #  
 17  # 3. Neither name of copyright holders nor the names of its contributors 
 18  # may be used to endorse or promote products derived from this software 
 19  # without specific prior written permission. 
 20  #  
 21  #  
 22  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
 23  # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
 24  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
 25  # A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR 
 26  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 27  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 28  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 29  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 30  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 31  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 32  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 33   
 34   
 35  ## 
 36  # This class Performs a perspective transform on an image.  Primarily  
 37  # implemented using OpenCV: cvWarpPerspective 
 38  ## 
 39   
 40  import unittest 
 41  import os.path 
 42  #import math 
 43   
 44  import cv     
 45  import numpy as np 
 46  import numpy.linalg as la 
 47  import pyvision as pv 
 48   
 49   
50 -def logPolar(im,center=None,radius=None,M=None,size=(64,128)):
51 ''' 52 Produce a log polar transform of the image. See OpenCV for details. 53 The scale space is calculated based on radius or M. If both are given 54 M takes priority. 55 ''' 56 #M=1.0 57 w,h = im.size 58 if radius == None: 59 radius = 0.5*min(w,h) 60 61 if M == None: 62 #rho=M*log(sqrt(x2+y2)) 63 #size[0] = M*log(r) 64 M = size[0]/np.log(radius) 65 66 if center == None: 67 center = pv.Point(0.5*w,0.5*h) 68 src = im.asOpenCV() 69 dst = cv.cvCreateImage( cv.cvSize(size[0],size[1]), 8, 3 ) 70 cv.cvLogPolar( src, dst, center.asOpenCV(), M, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS ) 71 return pv.Image(dst)
72 73 ## 74 # Create a transform or homography that optimally maps one set of points to 75 # the other set of points. Requires at least four points. 76 # A B C x1 = x2 77 # D E F y1 = y2 78 # G H 1 w1 = w2 79 #
80 -def PerspectiveFromPointsOld(source, dest, new_size):
81 ''' 82 Python/Scipy implementation implementation which finds a perspective 83 transform between points. 84 85 Most users should use PerspectiveFromPoints instead. This method 86 may be eliminated in the future. 87 ''' 88 assert len(source) == len(dest) 89 90 src_nrm = pv.AffineNormalizePoints(source) 91 source = src_nrm.transformPoints(source) 92 dst_nrm = pv.AffineNormalizePoints(dest) 93 dest = dst_nrm.transformPoints(dest) 94 95 A = [] 96 for i in range(len(source)): 97 src = source[i] 98 dst = dest[i] 99 100 # See Hartley and Zisserman Ch. 4.1, 4.1.1, 4.4.4 101 row1 = [0.0,0.0,0.0,-dst.w*src.x,-dst.w*src.y,-dst.w*src.w,dst.y*src.x,dst.y*src.y,dst.y*src.w] 102 row2 = [dst.w*src.x,dst.w*src.y,dst.w*src.w,0.0,0.0,0.0,-dst.x*src.x,-dst.x*src.y,-dst.x*src.w] 103 #row3 = [-dst.y*src.x,-dst.y*src.y,-dst.y*src.w,dst.x*src.x,dst.x*src.y,dst.x*src.w,0.0,0.0,0.0] 104 A.append(row1) 105 A.append(row2) 106 #A.append(row3) 107 A = np.array(A) 108 U,D,Vt = la.svd(A) 109 H = Vt[8,:].reshape(3,3) 110 111 matrix = np.dot(dst_nrm.inverse,np.dot(H,src_nrm.matrix)) 112 113 return PerspectiveTransform(matrix,new_size)
114 115
116 -def PerspectiveFromPoints(source, dest, new_size, method=0, ransacReprojThreshold=1.5):
117 ''' 118 Calls the OpenCV function: cvFindHomography. This method has 119 additional options to use the CV_RANSAC or CV_LMEDS methods to 120 find a robust homography. Method=0 appears to be similar to 121 PerspectiveFromPoints. 122 ''' 123 assert len(source) == len(dest) 124 125 n_points = len(source) 126 127 s = cv.CreateMat(n_points,2,cv.CV_32F) 128 d = cv.CreateMat(n_points,2,cv.CV_32F) 129 p = cv.CreateMat(3,3,cv.CV_32F) 130 131 for i in range(n_points): 132 s[i,0] = source[i].X() 133 s[i,1] = source[i].Y() 134 135 d[i,0] = dest[i].X() 136 d[i,1] = dest[i].Y() 137 138 results = cv.FindHomography(s,d,p,method,ransacReprojThreshold) 139 140 matrix = pv.OpenCVToNumpy(p) 141 142 return PerspectiveTransform(matrix,new_size)
143 144 145 146 147 148 ## 149 # The PerspectiveTransform class is used to transform images and points back and 150 # and forth between different coordinate systems. 151 # 152 # 153 # 154 # @param matrix a 3-by-3 matrix that defines the transformation. 155 # @param new_size the size of any new images created by this affine transform. 156 # @keyparam interpolate the image filtering function used for interpolating between pixels. 157 # @return an AffineTransform object
158 -class PerspectiveTransform:
159
160 - def __init__(self,matrix,new_size,interpolate=None):
161 self.matrix = matrix 162 self.inverse = la.inv(matrix) 163 self.size = new_size 164 self.interpolate = interpolate
165 166
167 - def __call__(self,data):
168 ''' 169 This is a simple interface to transform images or points. Simply 170 call the affine transform like a function and it will try to automatically 171 transform the argument. 172 173 @param data: an image, point, or list of points. 174 ''' 175 if isinstance(data,pv.Image): 176 return self.transformImage(data) 177 elif isinstance(data,pv.Point): 178 return self.transformPoint(data) 179 else: # assume this is a list of points 180 return self.transformPoints(data)
181 182 183 ## 184 # Transforms an image into the new coordinate system. 185 # 186 # @param im an pv.Image object
187 - def transformImage(self,im):
188 ''' Transform an image. ''' 189 if isinstance(self.matrix,cv.cvmat): 190 matrix = self.matrix 191 else: 192 matrix = pv.NumpyToOpenCV(self.matrix) 193 src = im.asOpenCV() 194 dst = cv.CreateImage( (self.size[0],self.size[1]), cv.IPL_DEPTH_8U, src.nChannels ); 195 cv.WarpPerspective( src, dst, matrix) 196 return pv.Image(dst)
197 198 199 ## 200 # Transforms a link.Point into the new coordinate system. 201 # 202 # @param pt a link.Point
203 - def transformPoint(self,pt):
204 ''' Transform a point from the old image to the new image ''' 205 vec = np.dot(self.matrix,pt.asVector2H()) 206 return pv.Point(x=vec[0,0],y=vec[1,0],w=vec[2,0])
207 208 ## 209 # Transforms a list of link.Points into the new coordinate system. 210 # 211 # @param pts a list of link.Points
212 - def transformPoints(self,pts):
213 ''' Transform a point from the old image to the new image ''' 214 if len(pts) == 0: 215 return [] 216 elif isinstance(pts[0],pv.Point): 217 # Transform pyvision points 218 return [ self.transformPoint(pt) for pt in pts ] 219 else: 220 # Transform a numpy array 221 pts = np.array(pts.T,dtype=np.float64) 222 r,c = pts.shape 223 pad = np.ones((1,c),dtype=np.float64) 224 pts = np.concatenate((pts,pad),axis=0) 225 #print pts 226 pts = np.dot(self.matrix,pts) 227 pts /= pts[2:3,:] 228 #print pts 229 return pts[:2,:].T
230 231 ## 232 # Transforms a link.Point from the new coordinate system to 233 # the old coordinate system. 234 # 235 # @param pts a list of link.Points
236 - def invertPoint(self,pt):
237 ''' Transform a point from the old image to the new image ''' 238 vec = np.dot(self.inverse,pt.asVector2H()) 239 return pv.Point(x=vec[0,0],y=vec[1,0],w=vec[2,0])
240 241 ## 242 # Transforms a list of link.Points from the new coordinate system to 243 # the old coordinate system. 244 # 245 # @param pts a list of link.Points
246 - def invertPoints(self,pts):
247 ''' Transform a point from the old image to the new image ''' 248 return [ self.invertPoint(pt) for pt in pts ]
249 250 ## 251 # @return the transform as a 3 by 3 matrix
252 - def asMatrix(self):
253 ''' Return the transform as a 3 by 3 matrix ''' 254 return self.matrix
255 256 ## 257 # Used to concatinate transforms. For example: 258 # <pre> 259 # # This code first scales and then translates 260 # S = AffineScale(2.0) 261 # T = AffineTranslate(4,5) 262 # A = T*S 263 # new_im = A.transformImage(old_im) 264 # </pre> 265 # @return a single link.AffineTransform which is the the same as 266 # both affine transforms.
267 - def __mul__(self,affine):
268 return PerspectiveTransform(np.dot(self.matrix,affine.matrix),self.size,self.interpolate)
269 270 # TODO: Add unit tests
271 -class _PerspectiveTest(unittest.TestCase):
272
273 - def setUp(self):
274 fname_a = os.path.join(pv.__path__[0],'data','test','perspective1a.jpg') 275 fname_b = os.path.join(pv.__path__[0],'data','test','perspective1b.jpg') 276 277 self.im_a = pv.Image(fname_a) 278 self.im_b = pv.Image(fname_b) 279 280 #corners clockwize: upper left, upper right, lower right, lower left 281 self.corners_a = (pv.Point(241,136),pv.Point(496,140),pv.Point(512,343),pv.Point(261,395)) 282 self.corners_b = (pv.Point(237,165),pv.Point(488,177),pv.Point(468,392),pv.Point(222,347)) 283 self.corners_t = (pv.Point(0,0),pv.Point(639,0),pv.Point(639,479),pv.Point(0,479)) 284 285 for pt in self.corners_a: 286 self.im_a.annotatePoint(pt)
287 288 #self.im_a.show() 289 #self.im_b.show() 290
291 - def test_four_points_a(self):
292 p = PerspectiveFromPoints(self.corners_a,self.corners_t,(640,480)) 293 _ = p.transformPoints(self.corners_a) 294 #for pt in pts: 295 # print "Point: %7.2f %7.2f"%(pt.X(), pt.Y()) 296 297 _ = p.transformImage(self.im_a)
298 #im.show() 299
300 - def test_four_points_b(self):
301 p = PerspectiveFromPoints(self.corners_b,self.corners_t,(640,480)) 302 _ = p.transformPoints(self.corners_b) 303 #for pt in pts: 304 # print "Point: %7.2f %7.2f"%(pt.X(), pt.Y()) 305 306 _ = p.transformImage(self.im_b)
307 #im.show() 308
309 - def test_four_points_ab(self):
310 p = PerspectiveFromPoints(self.corners_a,self.corners_b,(640,480)) 311 #pts = p.transformPoints(self.corners_b) 312 #for pt in pts: 313 # print "Point: %7.2f %7.2f"%(pt.X(), pt.Y()) 314 315 _ = p.transformImage(self.im_a)
316 #im.show() 317 #self.im_b.show() 318