import scipy.stats normal_distribution = scipy.stats.norm() # The inverse normal CDF inv_n_cdf = normal_distribution.ppf inv_n_cdf([0.95, 0.99]) %matplotlib inline import numpy as np import matplotlib.pyplot as plt # Constants for image simulations etc shape = [128, 128] # No of pixels in X, Y n_voxels = np.prod(shape) # Total pixels s_fwhm = 8 # Smoothing in number of pixels in x, y seed = 1939 # Seed for random no generator alpha = 0.05 # Default alpha level # Image of independent random nos np.random.seed(seed) # Seed the generator to get same numbers each time test_img = np.random.standard_normal(shape) plt.figure() plt.imshow(test_img) plt.set_cmap('bone') plt.xlabel('Pixel position in X') plt.ylabel('Pixel position in Y') plt.title('Image 1 - array of independent random numbers') # Bonferroni threshold for this image bonf_thresh = inv_n_cdf(1 - (alpha / n_voxels)) bonf_thresh # Divide into FWHM chunks and fill square from mean value sqmean_img = test_img.copy() for i in range(0, shape[0], s_fwhm): i_slice = slice(i, i+s_fwhm) for j in range(0, shape[1], s_fwhm): j_slice = slice(j, j+s_fwhm) vals = sqmean_img[i_slice, j_slice] sqmean_img[i_slice, j_slice] = vals.mean() # Multiply up to unit variance again sqmean_img *= s_fwhm # Show as image plt.figure() plt.imshow(sqmean_img) plt.set_cmap('bone') # axis xy plt.xlabel('Pixel position in X') plt.ylabel('Pixel position in Y') plt.title('Taking means over %s by %s elements from image 1' % (s_fwhm, s_fwhm)) # smooth random number image import scipy.ndimage as spn sd = s_fwhm / np.sqrt(8.*np.log(2)) # sigma for this FWHM stest_img = spn.filters.gaussian_filter(test_img, sd, mode='wrap') def gauss_2d_varscale(sigma): """ Return variance scaling for smoothing with 2D Gaussian of sigma `sigma` The code in this function isn't important for understanding the rest of the tutorial """ # Make a single 2D Gaussian using given sigma limit = sigma * 5 # go to limits where Gaussian will be at or near 0 x_inds = np.arange(-limit, limit+1) y_inds = x_inds # Symmetrical Gaussian (sd same in X and Y) [x,y] = np.meshgrid(y_inds, x_inds) # http://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function gf = np.exp(-(x*x + y*y) / (2 * sigma ** 2)) gf = gf/np.sum(gf) # Expectation of variance for this kernel AG = np.fft.fft2(gf) Pag = AG * np.conj(AG) # Power of the noise COV = np.real(np.fft.ifft2(Pag)) return COV[0, 0] # Restore smoothed image to unit variance svar = gauss_2d_varscale(sd) scf = np.sqrt(1 / svar) stest_img = stest_img * scf # display smoothed image plt.figure() plt.imshow(stest_img) plt.set_cmap('bone') # axis xy plt.xlabel('Pixel position in X') plt.ylabel('Pixel position in Y') plt.title('Image 1 - smoothed with Gaussian kernel of FWHM %s by %s pixels' % (s_fwhm, s_fwhm)) # No of resels resels = np.prod(np.divide(shape, s_fwhm)) resels def show_threshed(img, th): thimg = (img > th) plt.figure() plt.imshow(thimg) plt.set_cmap('bone') # axis xy plt.xlabel('Pixel position in X') plt.ylabel('Pixel position in Y') plt.title('Smoothed image thresholded at Z > %s' % th) # threshold at 2.75 and display show_threshed(stest_img, 2.75) # threshold at 3.25 and display show_threshed(stest_img, 3.25) # expected EC at various Z thresholds, for two dimensions plt.figure() Z = np.linspace(0, 5, 1000) def expected_ec_2d(z, resel_count): # From Worsley 1992 z = np.asarray(z) return (resel_count * (4 * np.log(2)) * ((2*np.pi)**(-3./2)) * z) * np.exp((z ** 2)*(-0.5)) expEC = expected_ec_2d(Z, resels) plt.plot(Z, expEC) plt.xlabel('Z score threshold') plt.ylabel('Expected EC for thresholded image') plt.title('Expected EC for smoothed image with %s resels' % resels) expected_ec_2d([2.75, 3.25], resels) # simulation to test the RFT threshold. # find approx threshold from the vector above. We're trying to find the Z value for which the expected EC is 0.05 tmp = (Z > 3) & (expEC<=alpha) alphaTH = Z[tmp][0] print('Using Z threshold of %f' % alphaTH) # Make lots of smoothed images and find how many have one or more blobs above threshold repeats = 1000 falsepos = np.zeros((repeats,)) maxes = np.zeros((repeats,)) edgepix = s_fwhm # to add edges to image - see below big_size = [s + edgepix * 2 for s in shape] i_slice = slice(edgepix + 1, shape[0] + edgepix + 1) j_slice = slice(edgepix + 1, shape[1] + edgepix + 1) for i in range(repeats): timg = np.random.standard_normal(size=big_size) # gives extra edges to throw away stimg = spn.filters.gaussian_filter(timg, sd, mode='wrap') # throw away edges to avoid artefactually high values # at image edges generated by the smoothing stimg = stimg[i_slice, j_slice] # Reset variance using scale factor from calculation above stimg *= scf falsepos[i] = np.any(stimg >= alphaTH) maxes[i] = stimg.max() print('False positive rate in simulation was %s (%s expected)' % (sum(falsepos) / float(repeats), alpha)) # Display some saved SPM results output from IPython.core.display import SVG SVG(filename='spm_t_results.svg') FWHM = [4.0528, 4.0172, 4.1192] resel_volume = np.prod(FWHM) resel_volume 44532 / resel_volume