Package pyvision :: Package surveillance :: Module MotionDetector
[hide private]
[frames] | no frames]

Source Code for Module pyvision.surveillance.MotionDetector

  1  ''' 
  2  Created on Nov 9, 2010 
  3  @author: svohara 
  4  ''' 
  5  # PyVision License 
  6  # 
  7  # Copyright (c) 2006-2008 Stephen O'Hara 
  8  # All rights reserved. 
  9  # 
 10  # Redistribution and use in source and binary forms, with or without 
 11  # modification, are permitted provided that the following conditions 
 12  # are met: 
 13  #  
 14  # 1. Redistributions of source code must retain the above copyright 
 15  # notice, this list of conditions and the following disclaimer. 
 16  #  
 17  # 2. Redistributions in binary form must reproduce the above copyright 
 18  # notice, this list of conditions and the following disclaimer in the 
 19  # documentation and/or other materials provided with the distribution. 
 20  #  
 21  # 3. Neither name of copyright holders nor the names of its contributors 
 22  # may be used to endorse or promote products derived from this software 
 23  # without specific prior written permission. 
 24  #  
 25  #  
 26  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
 27  # ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
 28  # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
 29  # A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR 
 30  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 31  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 32  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 33  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 34  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 35  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 36  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 37   
 38  import pyvision as pv 
 39  from pyvision.surveillance.BackgroundSubtraction import BG_SUBTRACT_AMF, BG_SUBTRACT_FD, BG_SUBTRACT_MCFD, BG_SUBTRACT_MF 
 40  import cv 
 41  import numpy as np 
 42   
 43  #TODO: Consider Renaming the following constants with a class-specific prefix 
 44  # so that they are easier to find in code completion. Recommend 
 45  # MD_BOUNDING_RECTS AND MD_STANDARDIZED_RECTS. Change will break existing code. 
 46  BOUNDING_RECTS     = "BOUNDING_RECTS" 
 47  STANDARDIZED_RECTS = "STANDARDIZED_RECTS" 
 48   
 49   
50 -class MotionDetector(object):
51 ''' 52 Uses background subtraction from an image buffer to detect 53 areas of motion in a video. 54 55 The general process is to update the image buffer and then 56 call the MotionDetector's detect() method. 57 ''' 58
59 - def __init__(self, imageBuff=None, thresh=20, method=BG_SUBTRACT_AMF, minArea=400, 60 rectFilter=None, buffSize=5, soft_thresh = False,rect_type=BOUNDING_RECTS,rect_sigma=2.0, 61 smooth=False):
62 ''' 63 Constructor 64 @param imageBuff: a pv.ImageBuffer object to be used in the background subtraction 65 step of the motion detection. If None, then this object will create an empty 66 5-frame buffer, and until the buffer is full, the results of the motion detection 67 will be nothing. 68 @param thresh: Used by the background subtraction to eliminate noise. 69 @param method: Select background subtraction method. See constants defined in 70 BackgroundSubtraction module 71 @param minArea: minimum foreground contour area required for detection 72 @param rectFilter: a function reference that takes a list of rectangles and 73 returns a list filtered in some way. This allows the user to arbitrarily 74 define rules to further limit motion detection results based on the geometry 75 of the bounding boxes. 76 @param buffSize: Only used if imageBuff==None. This controls the size of the 77 internal image buffer. 78 @param soft_thresh: Specify if the background subtraction method should 79 use a soft threshold, in which case the returned mask is no longer a binary 80 image, but represents weighted values. NOTE: NOT CURRENTLY IMPLEMENTED. 81 SOFT THRESHOLD WILL BE IGNORED, HARD THRESHOLD ONLY IN THIS VERSION. 82 @param smooth: applies smothing to the image before detection which can 83 reduce false detections. 84 @note: Until the image buffer is full, the result of the motion detection will be 85 nothing. See documentation on the detect(img) method of this class. 86 ''' 87 #initialize object variables 88 self._fgMask = None 89 self._minArea = minArea 90 self._filter = rectFilter 91 self._threshold = 20 92 self._softThreshold = False #soft_thresh 93 self._smooth = smooth 94 95 if imageBuff == None: 96 self._imageBuff = pv.ImageBuffer(N=buffSize) 97 else: 98 self._imageBuff = imageBuff 99 100 self._method = method 101 self._bgSubtract = None #can't initialize until buffer is full...so done in detect() 102 self._contours = [] 103 self._annotateImg = None 104 self._rect_type = rect_type 105 self._rect_sigma = rect_sigma
106
107 - def _initBGSubtract(self):
108 if self._method==BG_SUBTRACT_FD: 109 self._bgSubtract = pv.FrameDifferencer(self._imageBuff, self._threshold, 110 soft_thresh = self._softThreshold) 111 elif self._method==BG_SUBTRACT_MCFD: 112 self._bgSubtract = pv.MotionCompensatedFrameDifferencer(self._imageBuff, self._threshold, 113 soft_thresh = self._softThreshold) 114 elif self._method==BG_SUBTRACT_MF: 115 self._bgSubtract = pv.MedianFilter(self._imageBuff, self._threshold, 116 soft_thresh = self._softThreshold) 117 elif self._method==BG_SUBTRACT_AMF: 118 self._bgSubtract = pv.ApproximateMedianFilter(self._imageBuff, self._threshold, 119 soft_thresh = self._softThreshold) 120 else: 121 raise ValueError("Unknown Background Subtraction Method specified.")
122
123 - def _computeContours(self):
124 cvMask = self._fgMask.asOpenCVBW() 125 cvdst = cv.CloneImage(cvMask) #because cv.FindContours may alter source image 126 contours = cv.FindContours(cvdst, cv.CreateMemStorage(), cv.CV_RETR_CCOMP , cv.CV_CHAIN_APPROX_SIMPLE) 127 self._contours = contours
128
129 - def _computeConvexHulls(self):
130 hulls = [] 131 seq = self._contours 132 while not (seq == None) and len(seq) != 0: 133 cvxHull = cv.ConvexHull2(seq, cv.CreateMemStorage(), return_points=True) 134 hulls.append(cvxHull) 135 seq = seq.h_next() 136 137 self._convexHulls = hulls
138
139 - def __call__(self, img, **kwargs):
140 self.detect(img,**kwargs) 141 return self.getRects()
142 143
144 - def detect(self, img, ConvexHulls=False):
145 ''' 146 You call this method to update detection results, given the new 147 image in the stream. After updating detection results, use one 148 of the get*() methods, such as getRects() to see the results in the 149 appropriate format. 150 151 @param img: A pv.Image() to be added to the buffer as the most recent image, 152 and that triggers the new motion detection. Note that, depending on the 153 background subtraction method, this may not be the "key frame" for the 154 detection. The Frame Differencer returns a background model based on the 155 middle image, but Median and Approx. Median Filters return a background 156 model based on the most recent (last) image in the buffer. 157 158 @param ConvexHulls: If true, then the detected foreground pixels are 159 grouped into convex hulls, which can have the effect of removing internal 160 "holes" in the detection. 161 162 @return: The number of detected components in the current image. To get 163 more details, use the various getX() methods, like getForegroundMask(), 164 after calling detect(). 165 166 @note: Until the image buffer is full, this method will make no detections. 167 In which case, the return value will be -1, indicating this status. Also, 168 the getKeyFrame() method should be used to retrieve the key frame from 169 the buffer, which is not always the most recent image, depending on background 170 subtraction method. 171 ''' 172 # Smooth the image 173 cvim = img.asOpenCV() 174 cvim = cv.CloneImage(cvim) 175 if self._smooth: 176 cv.Smooth(cvim, cvim) 177 178 self._imageBuff.add(pv.Image(cvim)) 179 if not self._imageBuff.isFull(): 180 return -1 181 182 #initialize background subtraction object only after buffer is full. 183 if self._bgSubtract == None: 184 self._initBGSubtract() 185 186 #update current annotation image from buffer, as appropriate for 187 # the different methods 188 if self._method==BG_SUBTRACT_FD: 189 self._annotateImg = self._imageBuff.getMiddle() 190 if self._method==BG_SUBTRACT_MCFD: 191 self._annotateImg = self._imageBuff.getMiddle() 192 elif self._method==BG_SUBTRACT_MF: 193 self._annotateImg = self._imageBuff.getLast() 194 elif self._method==BG_SUBTRACT_AMF: 195 self._annotateImg = self._imageBuff.getLast() 196 197 mask = self._bgSubtract.getForegroundMask() 198 # if self._softThreshold: 199 # cvWeights = mask.asOpenCVBW() 200 # scale = (1.0/255.0) #because weights are 0-255 in mask image 201 # cvCurImg = self._annotateImg.copy().asOpenCVBW() 202 # cvDst = cv.CreateImage(cv.GetSize(cvWeights), cv.IPL_DEPTH_8U, 1) 203 # cv.Mul(cvWeights, cvCurImg, cvDst, scale) 204 # cv.Smooth(cvDst, cvDst) 205 # #update the foreground mask 206 # self._fgMask = pv.Image(cvDst) 207 # else: 208 209 cvBinary = mask.asOpenCVBW() 210 cv.Smooth(cvBinary, cvBinary) 211 cv.Dilate(cvBinary, cvBinary, None, 3) 212 cv.Erode(cvBinary, cvBinary, None, 1) 213 214 #update the foreground mask 215 self._fgMask = pv.Image(cvBinary) 216 217 #update the detected foreground contours 218 self._computeContours() 219 self._computeConvexHulls() 220 221 if ConvexHulls: 222 for hull in self._convexHulls: 223 cv.FillConvexPoly(cvBinary, hull, cv.RGB(255,255,255)) 224 #k = cv.CreateStructuringElementEx(15, 15, 7, 7, cv.CV_SHAPE_RECT) 225 #cv.Dilate(mask, mask, element=k, iterations=1) 226 227 return len(self._contours)
228
229 - def getKeyFrame(self):
230 ''' 231 @return: The "key frame" of the motion detector's buffer. This is the image 232 upon which detected motion rectangles, for example, should be overlaid. This 233 is not always the last image in the buffer because some background subtraction 234 methods (notably N-Frame Differencer) use the middle frame of the buffer. 235 ''' 236 return self._annotateImg #computed already by the detect() method
237
238 - def getForegroundMask(self):
239 ''' 240 @return: a binary pv.Image representing the foreground pixels 241 as determined by the selected background subtraction method. 242 @note: You must call the detect() method before getForegroundMask() to 243 get the updated mask. 244 ''' 245 return self._fgMask
246
247 - def getWatershedMask(self):
248 ''' 249 Uses the watershed algorithm to refine the foreground mask. 250 Currently, this doesn't work well on real video...maybe grabcut would be better. 251 ''' 252 cvMarkerImg = cv.CreateImage( self._fgMask.size, cv.IPL_DEPTH_32S, 1) 253 cv.SetZero(cvMarkerImg) 254 255 #fill each contour with a different gray level to label connected components 256 seq = self._contours 257 c = 50 258 while not (seq == None) and len(seq) != 0: 259 if cv.ContourArea(seq) > self._minArea: 260 c += 10 261 moments = cv.Moments(seq) 262 m00 = cv.GetSpatialMoment(moments, 0, 0) 263 m01 = cv.GetSpatialMoment(moments, 0, 1) 264 m10 = cv.GetSpatialMoment(moments, 1, 0) 265 centroid = ( int(m10/m00), int(m01/m00)) 266 cv.Circle(cvMarkerImg, centroid, 3, cv.RGB(c,c,c), cv.CV_FILLED) 267 seq = seq.h_next() 268 269 if(c>0): 270 img = self._annotateImg.asOpenCV() 271 cv.Watershed(img, cvMarkerImg) 272 273 tmp = cv.CreateImage( cv.GetSize(cvMarkerImg), cv.IPL_DEPTH_8U, 1) 274 cv.CvtScale(cvMarkerImg, tmp) 275 return pv.Image(tmp)
276
277 - def getForegroundPixels(self, bgcolor=None):
278 ''' 279 @param bgcolor: The background color to use. Specify as an (R,G,B) tuple. 280 Specify None for a blank/black background. 281 @return: The full color foreground pixels on either a blank (black) 282 background, or on a background color specified by the user. 283 @note: You must call detect() before getForegroundPixels() to 284 get updated information. 285 ''' 286 if self._fgMask == None: return None 287 288 #binary mask selecting foreground regions 289 mask = self._fgMask.asOpenCVBW() 290 291 #full color source image 292 image = self._annotateImg.copy().asOpenCV() 293 294 #dest image, full color, but initially all zeros (black/background) 295 # we will copy the foreground areas from image to here. 296 dest = cv.CloneImage(image) 297 if bgcolor==None: 298 cv.SetZero(dest) 299 else: 300 cv.Set(dest, cv.RGB(*bgcolor)) 301 302 cv.Copy(image,dest,mask) #copy only pixels from image where mask != 0 303 return pv.Image(dest)
304
305 - def getRects(self):
306 ''' 307 @return: the bounding boxes of the external contours of the foreground mask. The 308 boxes will either be the bounding rectangles of the contours, or a box fitted to 309 the contours based on the center of mass and n-sigma deviations in x and y. This 310 preference is selected when initializing the MotionDetector object. 311 312 @note: You must call detect() before getRects() to see updated results. 313 ''' 314 if self._rect_type == BOUNDING_RECTS: 315 return self.getBoundingRects() 316 317 elif self._rect_type == STANDARDIZED_RECTS: 318 return self.getStandardizedRects() 319 320 else: 321 raise ValueError("Unknown rect type: "+self._rect_type)
322 323
324 - def getBoundingRects(self):
325 ''' 326 @return: the bounding boxes of the external contours of the foreground mask. 327 @note: You must call detect() before getBoundingRects() to see updated results. 328 ''' 329 #create a list of the top-level contours found in the contours (cv.Seq) structure 330 rects = [] 331 if len(self._contours) < 1: return(rects) 332 seq = self._contours 333 while not (seq == None): 334 (x, y, w, h) = cv.BoundingRect(seq) 335 if (cv.ContourArea(seq) > self._minArea): 336 r = pv.Rect(x,y,w,h) 337 rects.append(r) 338 seq = seq.h_next() 339 340 if self._filter != None: 341 rects = self._filter(rects) 342 343 return rects
344
345 - def getStandardizedRects(self):
346 ''' 347 @return: the boxes centered on the target center of mass +- n_sigma*std 348 @note: You must call detect() before getStandardizedRects() to see updated results. 349 ''' 350 #create a list of the top-level contours found in the contours (cv.Seq) structure 351 rects = [] 352 if len(self._contours) < 1: return(rects) 353 seq = self._contours 354 while not (seq == None): 355 (x, y, w, h) = cv.BoundingRect(seq) 356 if (cv.ContourArea(seq) > self._minArea): # and self._filter(rect) 357 r = pv.Rect(x,y,w,h) 358 moments = cv.Moments(seq) 359 m_0_0 = cv.GetSpatialMoment(moments, 0, 0) 360 m_0_1 = cv.GetSpatialMoment(moments, 0, 1) 361 m_1_0 = cv.GetSpatialMoment(moments, 1, 0) 362 mu_2_0 = cv.GetCentralMoment(moments, 2, 0) 363 mu_0_2 = cv.GetCentralMoment(moments, 0, 2) 364 365 cx = m_1_0/m_0_0 366 cy = m_0_1/m_0_0 367 w = 2.0*self._rect_sigma*np.sqrt(mu_2_0/m_0_0) 368 h = 2.0*self._rect_sigma*np.sqrt(mu_0_2/m_0_0) 369 370 r = pv.CenteredRect(cx,cy,w,h) 371 372 rects.append(r) 373 seq = seq.h_next() 374 375 if self._filter != None: 376 rects = self._filter(rects) 377 378 return rects
379
380 - def getPolygons(self,return_all=False):
381 ''' 382 @param return_all: return all contours regardless of min area. 383 @return: the polygon contours of the foreground mask. The polygons are 384 compatible with pv.Image annotatePolygon() method. 385 @note: You must call detect() before getPolygons() to see updated results. 386 ''' 387 #create a list of the top-level contours found in the contours (cv.Seq) structure 388 polys = [] 389 if len(self._contours) < 1: return(polys) 390 seq = self._contours 391 while not (seq == None): 392 393 if return_all or (cv.ContourArea(seq) > self._minArea): 394 poly = [ pv.Point(*each) for each in seq ] 395 poly.append(poly[0]) 396 397 polys.append(poly) 398 399 seq = seq.h_next() 400 401 return polys
402
403 - def getConvexHulls(self):
404 ''' 405 @return: the convex hulls of the contours of the foreground mask. 406 @note: You must call detect() before getConvexHulls() to see updated results. 407 ''' 408 return self._convexHulls
409 410
411 - def getAnnotatedImage(self, showRects=True, showContours=False, 412 showConvexHulls=False, showFlow=False):
413 ''' 414 @return: the annotation image with selected objects drawn upon it. showFlow will 415 only work if the BG subtraction method was MCFD. 416 @note: You must call detect() prior to getAnnotatedImage() to see updated results. 417 ''' 418 rects = self.getRects() 419 outImg = self._annotateImg.copy() #deep copy, so can freely modify the copy 420 if outImg == None: return None 421 422 #draw optical flow information in white 423 if showFlow and (self._method == pv.BG_SUBTRACT_MCFD): 424 flow = self._bgSubtract.getOpticalFlow() 425 flow.annotateFrame(outImg) 426 427 if showContours or showConvexHulls: 428 cvimg = outImg.asOpenCV() 429 430 #draw contours in green 431 if showContours: 432 cv.DrawContours(cvimg, self._contours, cv.RGB(0, 255, 0), cv.RGB(255,0,0), 2) 433 434 #draw hulls in cyan 435 if showConvexHulls: 436 cv.PolyLine(cvimg, self._convexHulls, True, cv.RGB(0,255,255)) 437 438 #draw bounding box in yellow 439 if showRects: 440 for r in rects: 441 outImg.annotateRect(r,"yellow") 442 443 return outImg
444
445 - def annotateFrame(self, key_frame, rect_color='yellow', 446 contour_color='#00FF00', flow_color='white'):
447 ''' 448 Draws detection results on an image (key_frame) specified by the user. Specify 449 None as the color for any aspect you wish not drawn. 450 @return: Renders annotations onto key frame that shows detection information. 451 @note: You must call detect() prior to annotateFrame() to see updated results. 452 @note: Optical flow is only shown if method was MCFD 453 ''' 454 #key_frame = md.getKeyFrame() 455 456 if key_frame != None: 457 458 if contour_color != None: 459 for poly in self.getPolygons(): 460 key_frame.annotatePolygon(poly,color=contour_color,width=1) 461 462 if rect_color != None: 463 for rect in self.getRects(): 464 key_frame.annotatePolygon(rect.asPolygon(),width=2,color=rect_color) 465 466 if (flow_color != None) and (self._method == pv.BG_SUBTRACT_MCFD): 467 flow = self._bgSubtract.getOpticalFlow() 468 flow.annotateFrame(key_frame, type="TRACKING", color=flow_color)
469 470 #for rect in rects: 471 # key_frame.annotatePolygon(rect.asPolygon(),width=2) 472 # key_frame.annotatePoint(rect.center()) 473 474 #ilog(key_frame) 475 476
477 - def getForegroundTiles(self, bgcolor=None):
478 ''' 479 @param bgcolor: The background color to use. Specify as an (R,G,B) tuple. 480 Specify None for a blank/black background. 481 @return: a list of "tiles", where each tile is a small pv.Image 482 representing the clipped area of the annotationImg based on 483 the motion detection. Only the foreground pixels are copied, so 484 the result are tiles with full-color foreground pixels on the 485 specified background color (black by default). 486 @note: You must call detect() prior to getForegroundTiles() to get 487 updated information. 488 ''' 489 490 #binary mask selecting foreground regions 491 mask = self._fgMask.asOpenCVBW() 492 if mask == None: return None 493 494 #full color source image 495 image = self._annotateImg.copy().asOpenCV() 496 497 #dest image, full color, but initially all zeros (black/background) 498 # we will copy the foreground areas from image to here. 499 dest = cv.CloneImage(image) 500 if bgcolor==None: 501 cv.SetZero(dest) 502 else: 503 cv.Set(dest, cv.RGB(*bgcolor)) 504 505 cv.Copy(image,dest,mask) #copy only pixels from image where mask != 0 506 dst = pv.Image(dest) 507 508 rects = self.getRects() 509 510 tiles = [] 511 for r in rects: 512 #for every rectangle, crop from dest image 513 t = dst.crop(r) 514 tiles.append(t) 515 516 return tiles
517