Skip to content
This repository was archived by the owner on Jan 3, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/motion-heatmap/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@ This sample application is useful to see movement patterns over time. For exampl
* The vtest.avi video from https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi

## Setup
1. You need the extra modules installed for the MOG background subtractor. This tutorial was tested on Windows\*, and the easiest way to install it was using:
1. You need the extra modules installed for the MOG background subtractor. This tutorial was tested on Mac and Windows\*, and the easiest way to install it was using:
```
pip install opencv-contrib-python
```
2. Download the vtest.avi video from https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi and put it in the same folder as the python script.
3. Run the python script. You should see a diff-overlay.jpg when it's done.
4. You can use this script to run on other videos with `python motion-heatmap.py -i /path/to/video.ext`

![](images/diff-overlay.jpg)

Expand Down
82 changes: 66 additions & 16 deletions examples/motion-heatmap/motion-heatmap.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,70 @@
'''
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 Intel Corporation.
Licensed under the MIT license. See LICENSE file in the project root for full license information.
'''
"""

import numpy as np
import argparse
import cv2
import copy
import os
import numpy as np


def main():
cap = cv2.VideoCapture('vtest.avi')
parser = argparse.ArgumentParser(
description='Generate the heatmaps from a video', )

parser.add_argument('-i',
'--input',
help='Path to video',
default='vtest.avi',
type=str,
dest='INPUT')
parser.add_argument(
'-f',
'--frames',
help=
'Number of frames to go through. Default is 350, 0 is to go through full video (WARNING: this might take some time).',
type=int,
default=350,
dest='NUM_FRAMES')
parser.add_argument(
'-p',
'--progress-bar',
help=
'Display progress bar (requires tqdm, which can be installed with `pip install tqdm`.',
type=bool,
default=False,
dest='PROGRESS_BAR')

args = parser.parse_args()

cap = cv2.VideoCapture(args.INPUT)
# pip install opencv-contrib-python
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()

# number of frames is a variable for development purposes, you can change the for loop to a while(cap.isOpened()) instead to go through the whole video
num_frames = 350
# number of frames is a variable for development purposes,
# you can change the for loop to a while(cap.isOpened()) instead to go through the whole video
num_frames = args.NUM_FRAMES
if num_frames == 0:
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

first_iteration_indicator = 1
for i in range(0, num_frames):
'''
current_frame_number = 0

if args.PROGRESS_BAR:
from tqdm import trange
iterate_through = trange(num_frames)
else:
iterate_through = range(0, num_frames)
for i in iterate_through:
"""
There are some important reasons this if statement exists:
-in the first run there is no previous frame, so this accounts for that
-the first frame is saved to be used for the overlay after the accumulation has occurred
-the height and width of the video are used to create an empty image for accumulation (accum_image)
'''
"""
if (first_iteration_indicator == 1):
ret, frame = cap.read()
first_frame = copy.deepcopy(frame)
Expand All @@ -32,18 +74,22 @@ def main():
first_iteration_indicator = 0
else:
ret, frame = cap.read() # read a frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert to grayscale
gray = cv2.cvtColor(frame,
cv2.COLOR_BGR2GRAY) # convert to grayscale

fgmask = fgbg.apply(gray) # remove the background

# for testing purposes, show the result of the background subtraction
# cv2.imshow('diff-bkgnd-frame', fgmask)

# apply a binary threshold only keeping pixels above thresh and setting the result to maxValue. If you want
# motion to be picked up more, increase the value of maxValue. To pick up the least amount of motion over time, set maxValue = 1
# apply a binary threshold only keeping pixels above thresh and
# setting the result to maxValue. If you want
# motion to be picked up more, increase the value of maxValue.
# To pick up the least amount of motion over time, set maxValue = 1
thresh = 2
maxValue = 2
ret, th1 = cv2.threshold(fgmask, thresh, maxValue, cv2.THRESH_BINARY)
ret, th1 = cv2.threshold(fgmask, thresh, maxValue,
cv2.THRESH_BINARY)
# for testing purposes, show the threshold image
# cv2.imwrite('diff-th1.jpg', th1)

Expand All @@ -60,6 +106,7 @@ def main():

if cv2.waitKey(1) & 0xFF == ord('q'):
break
current_frame_number += 1

# apply a color map
# COLORMAP_PINK also works well, COLORMAP_BONE is acceptable if the background is dark
Expand All @@ -70,12 +117,15 @@ def main():
# overlay the color mapped image to the first frame
result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

# Output filename generation
head, tail = os.path.split(args.INPUT)
# save the final overlay image
cv2.imwrite('diff-overlay.jpg', result_overlay)
cv2.imwrite('{}_overlay.jpg'.format(tail.split('.')[0]), result_overlay)

# cleanup
cap.release()
cv2.destroyAllWindows()

if __name__=='__main__':
main()

if __name__ == '__main__':
main()