Some group have gotten webcam train tracking working.
This script filters a video based on HSV (hue, saturation, value) and outputs the average of the remaining coordinates.
It can be used to track a train with some colour attached as it accelerates.
We referenced (ripped off) a number of tutorials:
http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
http://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/
http://opencv-srf.blogspot.ca/2010/09/object-detection-using-color-seperation.html
To work it requires at least python dev, opencv, and numpy.
The rest of the prerequisites can be satisfied by repeatedly trying to run it.
If a file is not provided it will use a connected webcam (see webcam index).
The webcam on the track can be set to 15 fps high resolution and 30 fps low resolution (not set in this script). The colour saturation and contrast can also be set on the webcam which can help pick out colour.
The program currently outputs json and requires a keypress to get started.
#!/bin/python
# HSV calibrations for a black train with red mathnews strapped to it
# valuelower = 0
# valueupper = 255
# saturationlower = 104
# saturationupper = 187
# huelower = 0
# hueupper = 5
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
# change video caputure number
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
cv2.namedWindow("frames")
valuelower = 0
def set_valuelower(x):
global valuelower
#print valuelower
valuelower = x
cv2.createTrackbar('valuelower', 'frames', valuelower, 255, set_valuelower)
valueupper = 255
def set_valueupper(x):
global valueupper
#print x
valueupper = x
cv2.createTrackbar('valueupper', 'frames', valueupper, 255, set_valueupper)
saturationlower = 0
def set_saturationlower(x):
global saturationlower
#print x
saturationlower = x
cv2.createTrackbar('saturationlower', 'frames', saturationlower, 255, set_saturationlower)
saturationupper = 255
def set_saturationupper(x):
global saturationupper
#print x
saturationupper = x
cv2.createTrackbar('saturationupper', 'frames', saturationupper, 255, set_saturationupper)
huelower = 0
def set_huelower(x):
global huelower
#print x
huelower = x
cv2.createTrackbar('huelower', 'frames', huelower, 255, set_huelower)
hueupper = 255
def set_hueupper(x):
global hueupper
#print x
hueupper = x
cv2.createTrackbar('hueupper', 'frames', hueupper, 255, set_hueupper)
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
time1 = time.time()*1000
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame
frame = imutils.resize(frame, width=500)
boundaries = [([huelower, saturationlower, valuelower], [hueupper, saturationupper, valueupper])]
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(frame, frame, mask = mask)
try:
M = cv2.moments(mask)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.rectangle(output, (cx - 5, cy - 5), (cx + 5, cy + 5), (0, 255, 0), 2)
print '{ "x" : ', cx, ', "y" : ', cy, ', "timestamp" : "', str(time.time()*1000), '" }'
except:
pass
# show the frames
cv2.imshow("frames", np.hstack([frame, output]))
time2 = time.time()*1000
# wait works for 15 FPS only!
k= cv2.waitKey(70 - int(time2 - time1)) & 0xFF
if k != -1 & 0xFF:
print '{"x": -1, "y": -1, "timestamp" : "', str(time.time()*1000), '" } '
# END COLOUR TRACKING