rcParams['figure.figsize'] = (16, 4)
import cv2
cap = cv2.VideoCapture(0)
while (True):
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
img1 = frame
cap = cv2.VideoCapture(0)
while (True):
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
img2 = frame
#orb = cv2.ORB()
detector = cv2.FeatureDetector_create("ORB")
descriptorExtractor = cv2.DescriptorExtractor_create("ORB")
keypoints = detector.detect(img1)
(keypoints, descriptors) = descriptorExtractor.compute(img1, keypoints)
type(keypoints), len(keypoints)
keypoints[0]
help(keypoints[0])
keypoints[0].pt
points = [kp.pt for kp in keypoints]
points[:10]
zip(*points)
scatter(*zip(*points))
imshow(img1[:,:,::-1])
scatter(*zip(*points))
descriptors
descriptors.shape
keypoints2 = detector.detect(img2)
(keypoints2, descriptors2) = descriptorExtractor.compute(img1, keypoints2)
descriptors2.shape
points2 = [kp.pt for kp in keypoints2]
subplot(121)
imshow(img1[:,:,::-1])
scatter(*zip(*points))
subplot(122)
imshow(img2[:,:,::-1])
scatter(*zip(*points2))
gcf().set_figheight(8)
# create BFMatcher object
bf = cv2.DescriptorMatcher_create("BruteForce-Hamming")
#bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(descriptors,descriptors2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
type(matches), type(matches[0])
help(matches[0])
matches[0].distance, matches[0].imgIdx, matches[0].queryIdx, matches[0].trainIdx
matches_src = [keypoints[match.queryIdx].pt for match in matches]
imshow(img1[:,:,::-1])
scatter(*zip(*matches_src))
len(keypoints2)
matches_dest = [keypoints2[match.trainIdx].pt for match in matches]
len(matches_dest)
imshow(img2[:,:,::-1])
scatter(*zip(*matches_dest))
twoimg = r_[img1, img2]
imshow(twoimg[:,:,::-1])
gcf().set_figheight(8)
plot( ((5,0), (10,15)) )
plot( (5,0), (10,15))
matches_src[:5], matches_dest[:5]
zip(matches_src[:5])
zip(*matches_src[:5])
for i in range(5):
print zip(matches_src[i],matches_dest[i])
for i in range(5):
plot(*zip(matches_src[i],matches_dest[i]))
imshow(twoimg[:,:,::-1])
gcf().set_figheight(8)
scatter(*zip(*matches_src[:5]))
scatter(*zip(*matches_dest[:5]))
matches_dest[:5]
array(matches_dest[:5])
array(matches_dest[:5])[:,1]
transpoints = array(matches_dest[:5])
transpoints[:,1] +=100
transpoints
Nice! Automatic passing by reference.
imshow(twoimg[:,:,::-1])
gcf().set_figheight(8)
for i in range(50):
shifted_point = array(matches_dest[i])
shifted_point[1] += 480
plot(*zip(matches_src[i],shifted_point))
xlim((0, 640))
ylim((960, 0))
#cap = cv2.VideoCapture('slow.flv')
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 170,150,250,150 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('frame',frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
#else:
# cv2.imwrite(chr(k)+".jpg",img2)
else:
break
#cv2.destroyAllWindows()
cap.release()
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 320,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
#pts = cv2.boxPoints(ret)
#pts = np.int0(pts)
#pts = array(ret[0])
#cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('frame',frame)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
#else:
#cv2.imwrite(chr(k)+".jpg",img2)
else:
break
#cv2.destroyAllWindows()
cap.release()
By: Andrés Cabrera mantaraya36@gmail.com
For Course MAT 201A at UCSB
This ipython notebook is licensed under the CC-BY-NC-SA license: http://creativecommons.org/licenses/by-nc-sa/4.0/