Load libraries:
import brainstem as b
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
from matplotlib import pyplot as pp
import texture as tx
%matplotlib inline
import mpld3
mpld3.enable_notebook()
Stage 1:
# get a list of all available image files
filenames = b.get_filenames()
img = b.get_img(filenames[0], 4)
img = b.make_grey(img)
print img.shape
#pylab.imshow(img, cmap = cm.Greys_r, shape = ((1024, 768)))
(1125, 1500)
# get a list of all available image files
filenames = b.get_filenames()
img = b.get_img(filenames[0], 4)
img = b.make_grey(img)
img = img[300:-200, 300:-300]
pylab.imshow(img, cmap = cm.Greys_r)
print tx.get_freqs(img)
[0.0015713484026367724, 0.0031426968052735448, 0.0062853936105470897, 0.012570787221094179, 0.025141574442188359, 0.050283148884376717, 0.10056629776875343]
Stage 2:
to number of columns in image and number of angles requested
angle = 10
freqs = tx.get_freqs(img)
thetas = np.deg2rad(np.arange(0, 180, angle))
# kernels, kernel_freqs = tx.make_filter_bank(freqs, thetas)
num_rows = size(freqs)
num_cols = size(thetas)
print 'num_rows = ', num_rows, 'num_cols = ', num_cols
print 7*18
num_rows = 7 num_cols = 18 126
Stage 3:
fig = pylab.figure()
num_rows = size(freqs)
num_cols = size(thetas)
len_x, len_y = img.shape
my_output = [[[[]]*len_y]*len_x]*num_rows*num_cols
idx = 1
for frequency in freqs:
sigma_x, sigma_y = tx._compute_sigmas(frequency)
for theta in thetas:
kernel = np.real(tx.gabor_kernel(frequency, theta, bandwidth=1))
""""pylab.imshow(np.real(kernel))
pylab.show()"""
conv_img = tx.fftconvolve(img, kernel, 'same')
subp = fig.add_subplot(num_rows, num_cols, idx)
pylab.axis('off')
pylab.imshow(conv_img)
my_output[idx-1] = conv_img
idx = idx + 1
Stage 4:
len_x, len_y = img.shape
my_vars = zeros((len_x, len_y))
my_means = zeros((len_x, len_y))
for i in range(0, len_x):
for j in range(0, len_y):
a = []
for x in range(0, num_cols*num_rows):
a.append(my_output[x][i][j])
my_vars[i][j] = np.var(a)
my_means[i][j] = np.mean(a)
Stage 5:
new_img = (img - my_means) / my_vars
pylab.imshow(new_img)
pylab.imsave('week1_output.png', new_img)
num_layers = num_rows*num_cols;
power_center = zeros((len_x-100, len_y))
for i in range(1, num_layers):
curr_img = my_output[i][50:-50][::] # trim borders, noise from kernel convolution
tmp_mean = curr_img.mean()
tmp_std = curr_img.std()
new_img = np.round(np.abs(curr_img - tmp_mean) > (2*tmp_std))
power_center = power_center + new_img
print num_layers
126
# pylab.hist(power_center.reshape(size(power_center)), bins=100)
output15 = (power_center > 10) & (power_center < 25)
pylab.imshow(output15, cmap = cm.Greys_r)
<matplotlib.image.AxesImage at 0x107face50>
output25 = (power_center > 5) & (power_center <= 10)
pylab.imshow(output25, cmap = cm.Greys_r)
<matplotlib.image.AxesImage at 0x112602b90>
output5 = (power_center > 1) & (power_center <= 5)
pylab.imshow(output5, cmap = cm.Greys_r)
<matplotlib.image.AxesImage at 0x1133383d0>