1 '''
2 Created on Nov 9, 2010
3 @author: svohara
4 '''
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 import pyvision as pv
39 from pyvision.surveillance.BackgroundSubtraction import BG_SUBTRACT_AMF, BG_SUBTRACT_FD, BG_SUBTRACT_MCFD, BG_SUBTRACT_MF
40 import cv
41 import numpy as np
42
43
44
45
46 BOUNDING_RECTS = "BOUNDING_RECTS"
47 STANDARDIZED_RECTS = "STANDARDIZED_RECTS"
48
49
51 '''
52 Uses background subtraction from an image buffer to detect
53 areas of motion in a video.
54
55 The general process is to update the image buffer and then
56 call the MotionDetector's detect() method.
57 '''
58
59 - def __init__(self, imageBuff=None, thresh=20, method=BG_SUBTRACT_AMF, minArea=400,
60 rectFilter=None, buffSize=5, soft_thresh = False,rect_type=BOUNDING_RECTS,rect_sigma=2.0,
61 smooth=False):
62 '''
63 Constructor
64 @param imageBuff: a pv.ImageBuffer object to be used in the background subtraction
65 step of the motion detection. If None, then this object will create an empty
66 5-frame buffer, and until the buffer is full, the results of the motion detection
67 will be nothing.
68 @param thresh: Used by the background subtraction to eliminate noise.
69 @param method: Select background subtraction method. See constants defined in
70 BackgroundSubtraction module
71 @param minArea: minimum foreground contour area required for detection
72 @param rectFilter: a function reference that takes a list of rectangles and
73 returns a list filtered in some way. This allows the user to arbitrarily
74 define rules to further limit motion detection results based on the geometry
75 of the bounding boxes.
76 @param buffSize: Only used if imageBuff==None. This controls the size of the
77 internal image buffer.
78 @param soft_thresh: Specify if the background subtraction method should
79 use a soft threshold, in which case the returned mask is no longer a binary
80 image, but represents weighted values. NOTE: NOT CURRENTLY IMPLEMENTED.
81 SOFT THRESHOLD WILL BE IGNORED, HARD THRESHOLD ONLY IN THIS VERSION.
82 @param smooth: applies smothing to the image before detection which can
83 reduce false detections.
84 @note: Until the image buffer is full, the result of the motion detection will be
85 nothing. See documentation on the detect(img) method of this class.
86 '''
87
88 self._fgMask = None
89 self._minArea = minArea
90 self._filter = rectFilter
91 self._threshold = 20
92 self._softThreshold = False
93 self._smooth = smooth
94
95 if imageBuff == None:
96 self._imageBuff = pv.ImageBuffer(N=buffSize)
97 else:
98 self._imageBuff = imageBuff
99
100 self._method = method
101 self._bgSubtract = None
102 self._contours = []
103 self._annotateImg = None
104 self._rect_type = rect_type
105 self._rect_sigma = rect_sigma
106
108 if self._method==BG_SUBTRACT_FD:
109 self._bgSubtract = pv.FrameDifferencer(self._imageBuff, self._threshold,
110 soft_thresh = self._softThreshold)
111 elif self._method==BG_SUBTRACT_MCFD:
112 self._bgSubtract = pv.MotionCompensatedFrameDifferencer(self._imageBuff, self._threshold,
113 soft_thresh = self._softThreshold)
114 elif self._method==BG_SUBTRACT_MF:
115 self._bgSubtract = pv.MedianFilter(self._imageBuff, self._threshold,
116 soft_thresh = self._softThreshold)
117 elif self._method==BG_SUBTRACT_AMF:
118 self._bgSubtract = pv.ApproximateMedianFilter(self._imageBuff, self._threshold,
119 soft_thresh = self._softThreshold)
120 else:
121 raise ValueError("Unknown Background Subtraction Method specified.")
122
124 cvMask = self._fgMask.asOpenCVBW()
125 cvdst = cv.CloneImage(cvMask)
126 contours = cv.FindContours(cvdst, cv.CreateMemStorage(), cv.CV_RETR_CCOMP , cv.CV_CHAIN_APPROX_SIMPLE)
127 self._contours = contours
128
130 hulls = []
131 seq = self._contours
132 while not (seq == None) and len(seq) != 0:
133 cvxHull = cv.ConvexHull2(seq, cv.CreateMemStorage(), return_points=True)
134 hulls.append(cvxHull)
135 seq = seq.h_next()
136
137 self._convexHulls = hulls
138
142
143
144 - def detect(self, img, ConvexHulls=False):
145 '''
146 You call this method to update detection results, given the new
147 image in the stream. After updating detection results, use one
148 of the get*() methods, such as getRects() to see the results in the
149 appropriate format.
150
151 @param img: A pv.Image() to be added to the buffer as the most recent image,
152 and that triggers the new motion detection. Note that, depending on the
153 background subtraction method, this may not be the "key frame" for the
154 detection. The Frame Differencer returns a background model based on the
155 middle image, but Median and Approx. Median Filters return a background
156 model based on the most recent (last) image in the buffer.
157
158 @param ConvexHulls: If true, then the detected foreground pixels are
159 grouped into convex hulls, which can have the effect of removing internal
160 "holes" in the detection.
161
162 @return: The number of detected components in the current image. To get
163 more details, use the various getX() methods, like getForegroundMask(),
164 after calling detect().
165
166 @note: Until the image buffer is full, this method will make no detections.
167 In which case, the return value will be -1, indicating this status. Also,
168 the getKeyFrame() method should be used to retrieve the key frame from
169 the buffer, which is not always the most recent image, depending on background
170 subtraction method.
171 '''
172
173 cvim = img.asOpenCV()
174 cvim = cv.CloneImage(cvim)
175 if self._smooth:
176 cv.Smooth(cvim, cvim)
177
178 self._imageBuff.add(pv.Image(cvim))
179 if not self._imageBuff.isFull():
180 return -1
181
182
183 if self._bgSubtract == None:
184 self._initBGSubtract()
185
186
187
188 if self._method==BG_SUBTRACT_FD:
189 self._annotateImg = self._imageBuff.getMiddle()
190 if self._method==BG_SUBTRACT_MCFD:
191 self._annotateImg = self._imageBuff.getMiddle()
192 elif self._method==BG_SUBTRACT_MF:
193 self._annotateImg = self._imageBuff.getLast()
194 elif self._method==BG_SUBTRACT_AMF:
195 self._annotateImg = self._imageBuff.getLast()
196
197 mask = self._bgSubtract.getForegroundMask()
198
199
200
201
202
203
204
205
206
207
208
209 cvBinary = mask.asOpenCVBW()
210 cv.Smooth(cvBinary, cvBinary)
211 cv.Dilate(cvBinary, cvBinary, None, 3)
212 cv.Erode(cvBinary, cvBinary, None, 1)
213
214
215 self._fgMask = pv.Image(cvBinary)
216
217
218 self._computeContours()
219 self._computeConvexHulls()
220
221 if ConvexHulls:
222 for hull in self._convexHulls:
223 cv.FillConvexPoly(cvBinary, hull, cv.RGB(255,255,255))
224
225
226
227 return len(self._contours)
228
230 '''
231 @return: The "key frame" of the motion detector's buffer. This is the image
232 upon which detected motion rectangles, for example, should be overlaid. This
233 is not always the last image in the buffer because some background subtraction
234 methods (notably N-Frame Differencer) use the middle frame of the buffer.
235 '''
236 return self._annotateImg
237
239 '''
240 @return: a binary pv.Image representing the foreground pixels
241 as determined by the selected background subtraction method.
242 @note: You must call the detect() method before getForegroundMask() to
243 get the updated mask.
244 '''
245 return self._fgMask
246
248 '''
249 Uses the watershed algorithm to refine the foreground mask.
250 Currently, this doesn't work well on real video...maybe grabcut would be better.
251 '''
252 cvMarkerImg = cv.CreateImage( self._fgMask.size, cv.IPL_DEPTH_32S, 1)
253 cv.SetZero(cvMarkerImg)
254
255
256 seq = self._contours
257 c = 50
258 while not (seq == None) and len(seq) != 0:
259 if cv.ContourArea(seq) > self._minArea:
260 c += 10
261 moments = cv.Moments(seq)
262 m00 = cv.GetSpatialMoment(moments, 0, 0)
263 m01 = cv.GetSpatialMoment(moments, 0, 1)
264 m10 = cv.GetSpatialMoment(moments, 1, 0)
265 centroid = ( int(m10/m00), int(m01/m00))
266 cv.Circle(cvMarkerImg, centroid, 3, cv.RGB(c,c,c), cv.CV_FILLED)
267 seq = seq.h_next()
268
269 if(c>0):
270 img = self._annotateImg.asOpenCV()
271 cv.Watershed(img, cvMarkerImg)
272
273 tmp = cv.CreateImage( cv.GetSize(cvMarkerImg), cv.IPL_DEPTH_8U, 1)
274 cv.CvtScale(cvMarkerImg, tmp)
275 return pv.Image(tmp)
276
278 '''
279 @param bgcolor: The background color to use. Specify as an (R,G,B) tuple.
280 Specify None for a blank/black background.
281 @return: The full color foreground pixels on either a blank (black)
282 background, or on a background color specified by the user.
283 @note: You must call detect() before getForegroundPixels() to
284 get updated information.
285 '''
286 if self._fgMask == None: return None
287
288
289 mask = self._fgMask.asOpenCVBW()
290
291
292 image = self._annotateImg.copy().asOpenCV()
293
294
295
296 dest = cv.CloneImage(image)
297 if bgcolor==None:
298 cv.SetZero(dest)
299 else:
300 cv.Set(dest, cv.RGB(*bgcolor))
301
302 cv.Copy(image,dest,mask)
303 return pv.Image(dest)
304
306 '''
307 @return: the bounding boxes of the external contours of the foreground mask. The
308 boxes will either be the bounding rectangles of the contours, or a box fitted to
309 the contours based on the center of mass and n-sigma deviations in x and y. This
310 preference is selected when initializing the MotionDetector object.
311
312 @note: You must call detect() before getRects() to see updated results.
313 '''
314 if self._rect_type == BOUNDING_RECTS:
315 return self.getBoundingRects()
316
317 elif self._rect_type == STANDARDIZED_RECTS:
318 return self.getStandardizedRects()
319
320 else:
321 raise ValueError("Unknown rect type: "+self._rect_type)
322
323
325 '''
326 @return: the bounding boxes of the external contours of the foreground mask.
327 @note: You must call detect() before getBoundingRects() to see updated results.
328 '''
329
330 rects = []
331 if len(self._contours) < 1: return(rects)
332 seq = self._contours
333 while not (seq == None):
334 (x, y, w, h) = cv.BoundingRect(seq)
335 if (cv.ContourArea(seq) > self._minArea):
336 r = pv.Rect(x,y,w,h)
337 rects.append(r)
338 seq = seq.h_next()
339
340 if self._filter != None:
341 rects = self._filter(rects)
342
343 return rects
344
346 '''
347 @return: the boxes centered on the target center of mass +- n_sigma*std
348 @note: You must call detect() before getStandardizedRects() to see updated results.
349 '''
350
351 rects = []
352 if len(self._contours) < 1: return(rects)
353 seq = self._contours
354 while not (seq == None):
355 (x, y, w, h) = cv.BoundingRect(seq)
356 if (cv.ContourArea(seq) > self._minArea):
357 r = pv.Rect(x,y,w,h)
358 moments = cv.Moments(seq)
359 m_0_0 = cv.GetSpatialMoment(moments, 0, 0)
360 m_0_1 = cv.GetSpatialMoment(moments, 0, 1)
361 m_1_0 = cv.GetSpatialMoment(moments, 1, 0)
362 mu_2_0 = cv.GetCentralMoment(moments, 2, 0)
363 mu_0_2 = cv.GetCentralMoment(moments, 0, 2)
364
365 cx = m_1_0/m_0_0
366 cy = m_0_1/m_0_0
367 w = 2.0*self._rect_sigma*np.sqrt(mu_2_0/m_0_0)
368 h = 2.0*self._rect_sigma*np.sqrt(mu_0_2/m_0_0)
369
370 r = pv.CenteredRect(cx,cy,w,h)
371
372 rects.append(r)
373 seq = seq.h_next()
374
375 if self._filter != None:
376 rects = self._filter(rects)
377
378 return rects
379
381 '''
382 @param return_all: return all contours regardless of min area.
383 @return: the polygon contours of the foreground mask. The polygons are
384 compatible with pv.Image annotatePolygon() method.
385 @note: You must call detect() before getPolygons() to see updated results.
386 '''
387
388 polys = []
389 if len(self._contours) < 1: return(polys)
390 seq = self._contours
391 while not (seq == None):
392
393 if return_all or (cv.ContourArea(seq) > self._minArea):
394 poly = [ pv.Point(*each) for each in seq ]
395 poly.append(poly[0])
396
397 polys.append(poly)
398
399 seq = seq.h_next()
400
401 return polys
402
404 '''
405 @return: the convex hulls of the contours of the foreground mask.
406 @note: You must call detect() before getConvexHulls() to see updated results.
407 '''
408 return self._convexHulls
409
410
411 - def getAnnotatedImage(self, showRects=True, showContours=False,
412 showConvexHulls=False, showFlow=False):
413 '''
414 @return: the annotation image with selected objects drawn upon it. showFlow will
415 only work if the BG subtraction method was MCFD.
416 @note: You must call detect() prior to getAnnotatedImage() to see updated results.
417 '''
418 rects = self.getRects()
419 outImg = self._annotateImg.copy()
420 if outImg == None: return None
421
422
423 if showFlow and (self._method == pv.BG_SUBTRACT_MCFD):
424 flow = self._bgSubtract.getOpticalFlow()
425 flow.annotateFrame(outImg)
426
427 if showContours or showConvexHulls:
428 cvimg = outImg.asOpenCV()
429
430
431 if showContours:
432 cv.DrawContours(cvimg, self._contours, cv.RGB(0, 255, 0), cv.RGB(255,0,0), 2)
433
434
435 if showConvexHulls:
436 cv.PolyLine(cvimg, self._convexHulls, True, cv.RGB(0,255,255))
437
438
439 if showRects:
440 for r in rects:
441 outImg.annotateRect(r,"yellow")
442
443 return outImg
444
445 - def annotateFrame(self, key_frame, rect_color='yellow',
446 contour_color='#00FF00', flow_color='white'):
447 '''
448 Draws detection results on an image (key_frame) specified by the user. Specify
449 None as the color for any aspect you wish not drawn.
450 @return: Renders annotations onto key frame that shows detection information.
451 @note: You must call detect() prior to annotateFrame() to see updated results.
452 @note: Optical flow is only shown if method was MCFD
453 '''
454
455
456 if key_frame != None:
457
458 if contour_color != None:
459 for poly in self.getPolygons():
460 key_frame.annotatePolygon(poly,color=contour_color,width=1)
461
462 if rect_color != None:
463 for rect in self.getRects():
464 key_frame.annotatePolygon(rect.asPolygon(),width=2,color=rect_color)
465
466 if (flow_color != None) and (self._method == pv.BG_SUBTRACT_MCFD):
467 flow = self._bgSubtract.getOpticalFlow()
468 flow.annotateFrame(key_frame, type="TRACKING", color=flow_color)
469
470
471
472
473
474
475
476
478 '''
479 @param bgcolor: The background color to use. Specify as an (R,G,B) tuple.
480 Specify None for a blank/black background.
481 @return: a list of "tiles", where each tile is a small pv.Image
482 representing the clipped area of the annotationImg based on
483 the motion detection. Only the foreground pixels are copied, so
484 the result are tiles with full-color foreground pixels on the
485 specified background color (black by default).
486 @note: You must call detect() prior to getForegroundTiles() to get
487 updated information.
488 '''
489
490
491 mask = self._fgMask.asOpenCVBW()
492 if mask == None: return None
493
494
495 image = self._annotateImg.copy().asOpenCV()
496
497
498
499 dest = cv.CloneImage(image)
500 if bgcolor==None:
501 cv.SetZero(dest)
502 else:
503 cv.Set(dest, cv.RGB(*bgcolor))
504
505 cv.Copy(image,dest,mask)
506 dst = pv.Image(dest)
507
508 rects = self.getRects()
509
510 tiles = []
511 for r in rects:
512
513 t = dst.crop(r)
514 tiles.append(t)
515
516 return tiles
517