%pylab inline
Populating the interactive namespace from numpy and matplotlib
import cv2
# Some code from openCV docs
OpenCV: "OpenCV (Open Source Computer Vision) is a popular computer vision library started by Intel in 1999"
cap = cv2.VideoCapture(0)
cap.open(0)
True
cap.isOpened()
True
ret, frame = cap.read()
cap.release()
type(frame)
numpy.ndarray
frame.dtype
dtype('uint8')
frame.shape
(480, 640, 3)
imshow(frame)
<matplotlib.image.AxesImage at 0x7fbb91c97490>
imshow(frame[:,:,::-1])
<matplotlib.image.AxesImage at 0x7fbb902d8390>
Color layers are in wrong order!
image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
imshow(image_rgb)
<matplotlib.image.AxesImage at 0x7fbb8a19e410>
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
Because we have the image as a numpy array, we can apply any type of image processing to it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-13-2f316a103b3b> in <module>() 6 while(True): 7 # Capture frame-by-frame ----> 8 ret, frame = cap.read() 9 10 # Our operations on the frame come here KeyboardInterrupt:
cap.release()
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(frame, 40, 120, apertureSize = 3)
cv2.imshow('frame', edges)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
def detect(img):
cascade = cv2.CascadeClassifier("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img)
if len(rects) == 0:
return []
rects[:, 2:] += rects[:, :2]
return rects
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
rects = detect(frame)
for x1, y1, x2, y2 in rects:
cv2.rectangle(frame, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
cap = cv2.VideoCapture(0)
ret, prevframe = cap.read()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
difference = frame - prevframe
cv2.imshow('frame',difference)
prevframe = frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
cap = cv2.VideoCapture(0)
ret, prevframe = cap.read()
prevframe = cv2.cvtColor(prevframe, cv2.COLOR_BGR2GRAY)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
difference = frame - prevframe
cv2.imshow('frame',difference)
prevframe = frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
cap = cv2.VideoCapture(0)
sigma = 10
ret, prevframe = cap.read()
prevframe = cv2.cvtColor(prevframe, cv2.COLOR_BGR2GRAY)
prevframe = cv2.GaussianBlur( difference, (0,0), 30 )
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.GaussianBlur( frame, (0,0), 30 )
difference = frame - prevframe
difference = cv2.GaussianBlur(difference, (0,0), 30 )
cv2.imshow('frame',difference)
prevframe = frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
cap = cv2.VideoCapture(0)
sigma = 40
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,frame = cv2.threshold(frame,100,255,cv2.THRESH_BINARY)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
from scipy.ndimage import gaussian_filter
cap = cv2.VideoCapture(0)
sigma = 40
ret, prevframe = cap.read()
#prevframe = cv2.cvtColor(prevframe, cv2.COLOR_BGR2GRAY)
prevframe = gaussian_filter(prevframe, sigma)
prevframe2 = gaussian_filter(prevframe, sigma)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#sumframe = frame
sumframe = frame/3 + prevframe/3 + prevframe2/3
prevframe2 = prevframe
prevframe = frame
cv2.imshow('frame',sumframe)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
print "done!"
done!
We can make both FIR filters and IIR time filters!
from scipy.ndimage import gaussian_filter
cap = cv2.VideoCapture(0)
sigma = 40
ret, prevframe = cap.read()
prevframe = gaussian_filter(prevframe, sigma)
while(True):
ret, frame = cap.read()
sumframe = frame/4 + prevframe/2
prevframe = sumframe
cv2.imshow('Window',sumframe)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
from scipy.ndimage import gaussian_filter
cap = cv2.VideoCapture(0)
bgs = cv2.BackgroundSubtractorMOG(24*60, 1, 0.7, 0.01)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = bgs.apply(frame)
cv2.imshow('Window',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
cap = cv2.VideoCapture(0)
bgs = cv2.BackgroundSubtractorMOG(24*60, 5, 0.1)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bgmask = bgs.apply(frame)
cv2.imshow('frame', frame*(bgmask/255))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
#Motion Tracking uses code from http://derek.simkowiak.net/motion-tracking-with-python/
from scipy.ndimage import gaussian_filter
cap = cv2.VideoCapture(0)
ret, color_image = cap.read()
accum = np.float32(color_image)
while(True):
# Capture frame-by-frame
ret, color_image = cap.read()
color_image = cv2.GaussianBlur( color_image, (0,0), 19 )
cv2.accumulateWeighted( color_image, accum, 0.320)
difference = cv2.absdiff( color_image, accum.astype(uint8))
cv2.imshow('frame', difference)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
# blob tracking
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
accum = np.float32(frame)
params = cv2.SimpleBlobDetector_Params()
params.minDistBetweenBlobs = 50.0
params.filterByInertia = False
params.filterByConvexity = False
params.filterByColor = False
params.filterByCircularity = False
params.filterByArea = True
params.minArea = 20.0
params.maxArea = 500.0
params.minThreshold = 40
blobdetect = cv2.SimpleBlobDetector(params)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
color_image = cv2.GaussianBlur( frame, (0,0), 19 )
cv2.accumulateWeighted( frame, accum, 0.320)
difference = cv2.absdiff( frame, accum.astype(uint8))
keypoints = blobdetect.detect(difference)
for kp in keypoints:
cv2.circle(difference, (int(kp.pt[0]),int(kp.pt[1])), int(kp.size), (255, 0,0), -1)
cv2.imshow('frame', difference)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
done!
from scipy.ndimage import gaussian_filter
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cap.release()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(frame,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180, 200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(frame,(x1,y1),(x2,y2),(0,0,0),2)
imshow(frame, cmap=cm.gray)
<matplotlib.image.AxesImage at 0x7fe4246fc110>
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(frame,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(edges,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('frame', edges)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
#cv2.destroyAllWindows()
print "done!"
--------------------------------------------------------------------------- error Traceback (most recent call last) <ipython-input-13-5bc76e1820aa> in <module>() 4 # Capture frame-by-frame 5 ret, frame = cap.read() ----> 6 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 7 8 edges = cv2.Canny(frame,50,150,apertureSize = 3) error: /build/buildd/opencv-2.4.9+dfsg/modules/imgproc/src/color.cpp:3737: error: (-215) scn == 3 || scn == 4 in function cvtColor
oops! need to add checks for no lines...
By: Andrés Cabrera mantaraya36@gmail.com
For Course MAT 201A at UCSB
This ipython notebook is licensed under the CC-BY-NC-SA license: http://creativecommons.org/licenses/by-nc-sa/4.0/