import scipy as sp
from scipy import stats
import nipy.modalities.fmri.design_matrix as dm
from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm
# import rpy
from rpy2.robjects import r
from rpy2.robjects import FloatVector, IntVector, ListVector
# Import neuRosim
r['library']('neuRosim')
def simulate_data(onsets, # Onsets per condition, per region
nscans=100, # Number of scans
TR=2.0, # Tr
region_centers = [[20, 20], [60, 60]], # Centers of regions
region_radiuses = [15, 10], # Size of regions
effect_sizes = [[2.0], [.75]], # Effect sizes of conditions per region per
# condition [[r1c1, r1c2], [r2c1, r2c2]]
durations = [[2.0], [2.0]], # Durations per region, per condition
SNR = 15, # SNR
FWHM = 5, # Amount of smoothing
n_regions = 2, # Number of regions
fading = 0.1, # Amount of fading from regions to outside
size = [80, 80], # Total size of images
):
# Get total duration of scan
total_time = nscans * TR
# Convert input to R objects
onsets = [[FloatVector(ol_cond) for ol_cond in ol_region] for ol_region in onsets]
region_centers = [FloatVector(rc) for rc in region_centers]
size = IntVector(size)
# Set up design (neuRosim)
design = r['simprepTemporal'](regions=n_regions,
onsets=onsets,
durations=durations,
TR=TR,
totaltime=total_time,
effectsize=effect_sizes)
# Set up the spatial map (neuRosim)
spatial = r['simprepSpatial'](regions=n_regions,
coord=region_centers,
radius=region_radiuses,
form="sphere",
fading=fading)
# Get the simulated tata (neuRosim)
sim_data = r['simVOLfmri'](design=design,
image=spatial,
SNR=SNR,
noise="mixture",
dim=size,
w=FloatVector([0.05,0.1,0.01,0.09,0.05,0.7]),
FWHM=FWHM,
spat='gammaRF')
return np.array(sim_data)
Loading required package: deSolve This is neuRosim 0.2-10 neuRosim is BETA software! Please report any bugs.
First we need a function that creates random parameters to simulate multiple subjects with different brain anatomies
def simulate_subject(spatial_jitter,
radius_jitter,
effect_size_jitter,
effect_sizes=[2.5, 0.75],
radiuses=[15, 10],
snr_jitter=1,
region_centers=[[25, 25], [55, 55]],
standard_snr=2.0):
# Get region centers
region_centers =sp.stats.norm(loc=region_centers, scale=spatial_jitter).rvs((2,2))
# Get radiuses
radiuses = list(sp.stats.norm(loc=radiuses, scale=radius_jitter).rvs(2))
# Get effect sizes
effect_sizes = sp.stats.truncnorm(0, np.inf, loc=effect_sizes, scale=effect_size_jitter).rvs(2)
effect_sizes = [[e] for e in list(effect_sizes)]
# Get SNR
snr = sp.stats.norm(loc=standard_snr, scale=snr_jitter).rvs(1)
return region_centers, radiuses, effect_sizes, snr
Let's do 20 subjects
We jitter over regions by making them normally distributed (sd = 5 voxel). We do the same for radius of the region (sd=5) and the effect size (sd=.5).
n_subjects = 20
n_scans = 500
TR = 2.0
onsets = np.arange(10, n_scans*TR, 15)
region_centers, radiuses, effect_sizes, snr = zip(*[simulate_subject(5, 5, .5) for i in np.arange(n_subjects)])
ms_data = [] #Multiple subjects-data...
for (region_center, radius, effect_size, snr) in zip(region_centers, radiuses, effect_sizes, snr):
ms_data.append(simulate_data([[onsets]] *2, # For both regions, for one condition...
nscans=n_scans,
FWHM=5,
fading=0.01,
region_centers=region_center,
region_radiuses=radius,
effect_sizes=effect_size,))
np.save('/home/gdholla1/notebooks/ms_data.np', ms_data)
Now write a function that fits a single subject GLM with sandwich estimation
ms_data = np.load('/home/gdholla1/notebooks/ms_data.np.npy')
def fit_sandwich_glm(data, onsets, q=25, c=None):
if c == None:
c = np.array([[1, 0, 0]])
# Set up paradigm and design matrix
paradigm = EventRelatedParadigm(['task']*len(onsets), onsets)
TR = 2.0
frametimes = np.arange(0, data.shape[-1] * TR, TR)
X, names = dm.dmtx_light(frametimes, paradigm, drift_model='polynomial',
hfcut=128, hrf_model='canonical')
# cut up data
cuts = np.linspace(0, data.shape[-1], q+1)
data_cut = np.array([data[:, :, cuts[i]:cuts[i+1]] for i in np.arange(q)])
X_cut = np.array([X[cuts[i]:cuts[i+1], :] for i in np.arange(q)])
beta = np.zeros((X.shape[1],)+ data.shape[:-1]) # (num_regressors, size_x, size_y)
# Fit GLM
predicted = np.zeros_like(data_cut)
residuals = np.zeros_like(data_cut)
for i, x in enumerate(X_cut):
# tensortdot is necessary to indicate which axes need to be 'summed' and get beta
beta += np.einsum('ij,klj', np.linalg.pinv(x.T.dot(x)).dot(x.T), data_cut[i])
beta /= q
for i, x in enumerate(X_cut):
# Now we can get predicted timecourses
predicted[i] = np.tensordot(beta, x, (0, 1))
# ... and residuals
residuals[i] = data_cut[i] - predicted[i]
# Estimate covariance residuals
W = np.zeros((residuals.shape[-1],) * 2 + data.shape[:-1]) # (length_block, length_block, width, height)
for residual in residuals:
W += np.einsum('...i, ...j->ij...', residual, residual) # do outer product over last two dimensions, broadcast the rest
W /= (q - 1)
# Give variance matrix
V = np.zeros((X.shape[1],) * 2 + data.shape[:-1]) # (num_regresor, num_regressor, width, height)
for i, x in enumerate(X_cut):
sandwich1 = np.linalg.pinv(x.T.dot(x)).dot(x.T)
sandwich2 = x.dot(np.linalg.pinv(x.T.dot(x)))
# Apply first part sandwich
v = np.tensordot(sandwich1, W, axes=((1), (0)))
# Apply second part
v = np.tensordot(v, sandwich2, (1, 0))
# Roll axis to get correct shape
v = np.rollaxis(v, -1, 1)
V += v
# Correct for number of replications
V /= q
# Get t- and z-maps
dof = q - 1
t = np.tensordot(c, beta, (1, 0)).squeeze()
t /= np.tensordot(np.tensordot(c, V, (1,0)), c.T, (1, 0)).squeeze()
z = sp.stats.norm.ppf(sp.stats.t(dof).cdf(t))
# Correct for numerical rounding errors
z[z == np.inf] = z[z != np.inf].max()
return beta, V, residuals, t, z, X_cut
Fit all GLMs
betas, Vs, residualss, ts, zs, design_matrices = zip(*[fit_sandwich_glm(d, onsets) for d in ms_data])
Show individual z-maps
plt.figure(figsize=(15, 10))
for i, z in enumerate(zs):
plt.subplot(5,4, i +1)
plt.imshow(z, cmap=plt.cm.hot)
plt.title('Subject %d' % (i + 1))
plt.colorbar()
plt.tight_layout()
We save the data to nifti so we can use it in FSL. We also need a mask and a text file indicating the degrees of freedom.
import nibabel as nb
# Degrees of freedom
dof = len(ms_data) - 1
np.savetxt('dof.txt', [dof], fmt='%d')
# Just take a contrast with only first (task-)parameter for now
for i in np.arange(len(ms_data)):
nb.save(nb.Nifti1Image(betas[i][0][..., np.newaxis], np.identity(4)), 'cope_%d.nii.gz' % i)
nb.save(nb.Nifti1Image(Vs[i][0, 0][..., np.newaxis], np.identity(4)), 'varcope_%d.nii.gz' % i)
# Make a simple mask
nb.save(nb.Nifti1Image(np.ones_like(betas[0][0]), np.identity(4)), 'mask.nii.gz')
For the level 2 analysis we use the standard random effects model FLAME1 of the FSL package. Then we cluster using Gaussian Random Field Theory
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
from nipype.workflows.fmri.fsl.estimate import create_fixed_effects_flow
import os
# Standard fixed effets model
level2 = create_fixed_effects_flow()
level2.inputs.flameo.run_mode = 'flame1'
level2.base_dir = '/home/gdholla1/workflow_folders/'
# Use the contrasts and variances from the GLM
level2.inputs.inputspec.copes = [os.path.abspath('cope_%d.nii.gz') % i for i in np.arange(len(ms_data))]
level2.inputs.inputspec.varcopes = [os.path.abspath('varcope_%d.nii.gz') % i for i in np.arange(len(ms_data))]
level2.inputs.inputspec.dof_files = [os.path.abspath('dof.txt')] * len(ms_data)
level2.inputs.l2model.num_copes = len(ms_data)
level2.inputs.flameo.mask_file = os.path.abspath('mask.nii.gz')
# Estimate smoothness for GRF
smooth_est = pe.Node(fsl.SmoothEstimate(), name='smooth_estimate')
smooth_est.inputs.mask_file = os.path.abspath('mask.nii.gz')
pickfirst = lambda x: x[0]
level2.connect(level2.get_node('flameo'), ('zstats', pickfirst), smooth_est, 'zstat_file')
cluster = pe.Node(fsl.Cluster(), name='cluster')
cluster.inputs.threshold = 2.3
cluster.inputs.out_threshold_file = 'z_simulation_threshold.nii.gz'
level2.connect(level2.get_node('flameo'), ('zstats', pickfirst), cluster, 'in_file')
level2.connect(smooth_est, 'dlh', cluster, 'dlh')
# Write data to datasink
ds = pe.Node(nio.DataSink(), name='datasink')
ds.inputs.base_directory = '/home/gdholla1/data/in_limbo'
level2.connect(level2.get_node('flameo'), ('zstats', pickfirst), ds, 'zstats_simulation')
level2.connect(cluster, 'threshold_file', ds, 'zstats_simulation.@out_threshold_file')
level2.run()
INFO:workflow:['check', 'execution', 'logging'] INFO:workflow:Running serially. INFO:workflow:Executing node copemerge in dir: /home/gdholla1/workflow_folders/fixedfx/copemerge INFO:workflow:Executing node _copemerge0 in dir: /home/gdholla1/workflow_folders/fixedfx/copemerge/mapflow/_copemerge0 INFO:workflow:Running: fslmerge -t cope_0_merged.nii.gz /home/gdholla1/notebooks/cope_0.nii.gz /home/gdholla1/notebooks/cope_1.nii.gz /home/gdholla1/notebooks/cope_2.nii.gz /home/gdholla1/notebooks/cope_3.nii.gz /home/gdholla1/notebooks/cope_4.nii.gz /home/gdholla1/notebooks/cope_5.nii.gz /home/gdholla1/notebooks/cope_6.nii.gz /home/gdholla1/notebooks/cope_7.nii.gz /home/gdholla1/notebooks/cope_8.nii.gz /home/gdholla1/notebooks/cope_9.nii.gz /home/gdholla1/notebooks/cope_10.nii.gz /home/gdholla1/notebooks/cope_11.nii.gz /home/gdholla1/notebooks/cope_12.nii.gz /home/gdholla1/notebooks/cope_13.nii.gz /home/gdholla1/notebooks/cope_14.nii.gz /home/gdholla1/notebooks/cope_15.nii.gz /home/gdholla1/notebooks/cope_16.nii.gz /home/gdholla1/notebooks/cope_17.nii.gz /home/gdholla1/notebooks/cope_18.nii.gz /home/gdholla1/notebooks/cope_19.nii.gz INFO:workflow:Executing node gendofvolume in dir: /home/gdholla1/workflow_folders/fixedfx/gendofvolume INFO:workflow:Executing node varcopemerge in dir: /home/gdholla1/workflow_folders/fixedfx/varcopemerge INFO:workflow:Executing node _varcopemerge0 in dir: /home/gdholla1/workflow_folders/fixedfx/varcopemerge/mapflow/_varcopemerge0 INFO:workflow:Running: fslmerge -t varcope_0_merged.nii.gz /home/gdholla1/notebooks/varcope_0.nii.gz /home/gdholla1/notebooks/varcope_1.nii.gz /home/gdholla1/notebooks/varcope_2.nii.gz /home/gdholla1/notebooks/varcope_3.nii.gz /home/gdholla1/notebooks/varcope_4.nii.gz /home/gdholla1/notebooks/varcope_5.nii.gz /home/gdholla1/notebooks/varcope_6.nii.gz /home/gdholla1/notebooks/varcope_7.nii.gz /home/gdholla1/notebooks/varcope_8.nii.gz /home/gdholla1/notebooks/varcope_9.nii.gz /home/gdholla1/notebooks/varcope_10.nii.gz /home/gdholla1/notebooks/varcope_11.nii.gz /home/gdholla1/notebooks/varcope_12.nii.gz /home/gdholla1/notebooks/varcope_13.nii.gz /home/gdholla1/notebooks/varcope_14.nii.gz /home/gdholla1/notebooks/varcope_15.nii.gz /home/gdholla1/notebooks/varcope_16.nii.gz /home/gdholla1/notebooks/varcope_17.nii.gz /home/gdholla1/notebooks/varcope_18.nii.gz /home/gdholla1/notebooks/varcope_19.nii.gz INFO:workflow:Executing node l2model in dir: /home/gdholla1/workflow_folders/fixedfx/l2model INFO:workflow:Executing node flameo in dir: /home/gdholla1/workflow_folders/fixedfx/flameo INFO:workflow:Executing node _flameo0 in dir: /home/gdholla1/workflow_folders/fixedfx/flameo/mapflow/_flameo0 INFO:workflow:Running: flameo --copefile=/home/gdholla1/workflow_folders/fixedfx/copemerge/mapflow/_copemerge0/cope_0_merged.nii.gz --covsplitfile=/home/gdholla1/workflow_folders/fixedfx/l2model/design.grp --designfile=/home/gdholla1/workflow_folders/fixedfx/l2model/design.mat --dofvarcopefile=/home/gdholla1/workflow_folders/fixedfx/gendofvolume/dof_file.nii.gz --ld=stats --maskfile=/home/gdholla1/notebooks/mask.nii.gz --runmode=flame1 --tcontrastsfile=/home/gdholla1/workflow_folders/fixedfx/l2model/design.con --varcopefile=/home/gdholla1/workflow_folders/fixedfx/varcopemerge/mapflow/_varcopemerge0/varcope_0_merged.nii.gz INFO:interface:stdout 2014-11-06T10:47:26.409426:Log directory is: stats INFO:interface:stdout 2014-11-06T10:47:26.416408:Setting up: INFO:interface:stdout 2014-11-06T10:47:26.444181:ntptsing=20.000000 INFO:interface:stdout 2014-11-06T10:47:26.444181: INFO:interface:stdout 2014-11-06T10:47:26.444181:evs_group=1.000000 INFO:interface:stdout 2014-11-06T10:47:26.444181: INFO:interface:stdout 2014-11-06T10:47:26.445541:No f contrasts INFO:interface:stdout 2014-11-06T10:47:26.445869:nevs=1 INFO:interface:stdout 2014-11-06T10:47:26.445869:ntpts=20 INFO:interface:stdout 2014-11-06T10:47:26.445869:ngs=1 INFO:interface:stdout 2014-11-06T10:47:26.445869:nvoxels=6400 INFO:interface:stdout 2014-11-06T10:47:26.447085:Running: INFO:interface:stdout 2014-11-06T10:47:26.447085:nmaskvoxels=6400 INFO:interface:stdout 2014-11-06T10:47:27.733692: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 INFO:interface:stdout 2014-11-06T10:47:27.734713:nmaskvoxels=6400 INFO:interface:stdout 2014-11-06T10:47:27.734713:Saving results INFO:interface:stdout 2014-11-06T10:47:27.814960: INFO:interface:stdout 2014-11-06T10:47:27.814960:Log directory was: stats INFO:workflow:Executing node smooth_estimate in dir: /home/gdholla1/workflow_folders/fixedfx/smooth_estimate INFO:workflow:Running: smoothest --mask=/home/gdholla1/notebooks/mask.nii.gz --zstat=/home/gdholla1/workflow_folders/fixedfx/flameo/mapflow/_flameo0/stats/zstat1.nii.gz INFO:interface:stdout 2014-11-06T10:47:28.104103:DLH 0.0118104 INFO:interface:stdout 2014-11-06T10:47:28.104103:VOLUME 6400 INFO:interface:stdout 2014-11-06T10:47:28.104103:RESELS 234.757 INFO:workflow:Executing node cluster in dir: /home/gdholla1/workflow_folders/fixedfx/cluster INFO:workflow:Running: cluster --dlh=0.0118104000 --in=/home/gdholla1/workflow_folders/fixedfx/flameo/mapflow/_flameo0/stats/zstat1.nii.gz --othresh=z_simulation_threshold.nii.gz --thresh=2.3000000000 INFO:interface:stdout 2014-11-06T10:47:28.333380:Cluster Index Voxels MAX MAX X (vox) MAX Y (vox) MAX Z (vox) COG X (vox) COG Y (vox) COG Z (vox) INFO:interface:stdout 2014-11-06T10:47:28.333380:4 998 5.55 20 25 0 22.6 21.9 0 INFO:interface:stdout 2014-11-06T10:47:28.334530:3 13 2.67 52 55 0 51.9 55.1 0 INFO:interface:stdout 2014-11-06T10:47:28.334530:2 5 2.37 56 55 0 56.2 55.4 0 INFO:interface:stdout 2014-11-06T10:47:28.334530:1 3 2.34 58 58 0 57.7 58.7 0 INFO:workflow:Executing node datasink in dir: /home/gdholla1/workflow_folders/fixedfx/datasink
<networkx.classes.digraph.DiGraph at 0x695b7d0>
Let's see what the level 2 z-map and thresholded mask looks like and find the comparison voxel
fig = plt.figure(figsize=(14,6))
plt.subplot(121)
z = nb.load('/home/gdholla1/data/in_limbo/zstats_simulation/_flameo0/zstat1.nii.gz').get_data()
plt.imshow(z, cmap=plt.cm.hot, vmin=z.min(), vmax=z.max())
plt.grid(False)
plt.title('raw Level 2 z-map')
plt.subplot(122)
z_threshold = nb.load('/home/gdholla1/data/in_limbo/zstats_simulation/z_simulation_threshold.nii.gz').get_data()
im = plt.imshow(z_threshold, cmap=plt.cm.hot, vmin=z.min(), vmax=z.max())
plt.grid(False)
# Get comparison voxel
comparison_index = np.unravel_index(np.ma.argmin(np.ma.masked_array(z_threshold, z_threshold == 0)), z.shape)
plt.scatter(*comparison_index[::-1], marker='o', s=500, c='r', lw=5, facecolors='none', label='comparison voxel')
plt.xlim(0, 80)
plt.ylim(80, 0)
plt.title('Thresholded level 2 z-map (z>2.3, cluster-based correction)')
# plt.colorbar()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
<matplotlib.colorbar.Colorbar instance at 0xb81d6c8>
Now we need to find the variance of the in limbo t-value estimator for every voxel, so we can make a weighted least-squares GLM using equation 13:
$W_{ill} = \frac{1} {\sqrt{c\hat{V}_{il} c' + c \hat{V}_{vl} c' - c \hat{V}_{ivl} c'}}$
For this we need to do the same as in the individual subject case: We will have to use the Sandwich to get the covariance between this comparison voxel and every other voxel, using
$\hat{V}_{ij} = \frac{1}{q} \sum_{k=1}^{q} (X_k'X_k)^{-1}X_k' W_{ij} X_k(X_k'X_k)^{-1},$ (formula 7)
with $W_{ij} = \frac{1}{q - 1} \sum_{k=1}^{q} \hat{r}_{ik}\hat{r}'_{jk}$ (formula 8) .
def get_covariance_with_comparison_voxel(residuals, comparison_index, design_matrices):
# Get number of replications
q = residuals.shape[0]
# Get the residuals in the comparison voxel
residuals_comparison_voxel = residuals[(Ellipsis,) + comparison_index + (Ellipsis,)]
# Allocate memory for W_ij matrix
W_ij = np.zeros((residuals.shape[-1],) * 2 + residuals.shape[1:-1]) # (length_block, length_block, width, height)
# Create covariance matrices for every residual
for comp_residual, residual in zip(residuals_comparison_voxel, residuals):
W_ij += np.einsum('...i, ...j->ij...', comp_residual, residual) # do outer product over last two dimensions, broadcast the rest
# Normalize
W_ij /= (q - 1)
# Allocate memory for the covariance matrix of every voxel with the comparison voxel
V_ij = np.zeros((design_matrices.shape[-1],) * 2 + residuals.shape[1:-1]) # (num_regresor, num_regressor, width, height)
for i, x in enumerate(design_matrices):
sandwich1 = np.linalg.pinv(x.T.dot(x)).dot(x.T)
sandwich2 = x.dot(np.linalg.pinv(x.T.dot(x)))
# Apply first part sandwich
v = np.tensordot(sandwich1, W_ij, axes=((1), (0)))
# Apply second part
v = np.tensordot(v, sandwich2, (1, 0))
# Roll axis to get correct shape
v = np.rollaxis(v, -1, 1)
V_ij += v
V_ij /= q
return V_ij
Get $V_ij$
$\hat{V}_{ij} = \frac{1}{q} \sum_{k=1}^{q} (X_k'X_k)^{-1}X_k' W_{ij} X_k(X_k'X_k)^{-1},$ (formula 7)
V_ijs = [get_covariance_with_comparison_voxel(residuals, comparison_index, design_matrix) for residuals, design_matrix in zip(residualss, design_matrices)]
V_ijs = np.array(V_ijs)
$W_{ill} = \frac{1} {\sqrt{c\hat{V}_{il} c' + c \hat{V}_{vl} c' - c \hat{V}_{ivl} c'}} = \frac{1} {\sqrt{cV1c' + cV2c' - cV3c'}}$
Vs = np.array(Vs)
c = np.array([[1, 0, 0]])
V1 = np.tensordot(np.tensordot(c, Vs, (1,1)).squeeze(), c.T, (1, 0))
V2 = np.tensordot(np.tensordot(c, Vs[(Ellipsis, ) + comparison_index], (1,1)), c.T, (-1, 0))
V3 = np.tensordot(np.tensordot(c, V_ijs, (1,1)).squeeze(), c.T, (1, 0))
W_ill = np.zeros((Vs.shape[0], Vs.shape[0]) + ms_data.shape[1:-1]) # Allocate diagonal matrix for every voxel
W_ill[np.arange(Vs.shape[0]), np.arange(Vs.shape[0]), ...] = 1 / np.sqrt((V1.squeeze().T + V2.squeeze() - 2*V3.squeeze().T).T) # Squeeze and transform to facilitate broadcasting
-c:9: RuntimeWarning: divide by zero encountered in divide
Now we set up a matrix Z with the difference in contrasts between every voxel $i$ and the comparison voxel:
$Z_{il} = c\beta_{vl} - c\beta_{il}$
Z = (np.array(betas)[(Ellipsis, 0) + comparison_index] - np.array(betas)[:, 0, ...].T).T # Transform to facilitate broadcasting
Now we can use a weighted GLM:
$\beta_{wls} = (X'WX)^{-1}X'WZ$
$V_{wls} = \hat{\sigma}^2 (X'WX)^{-1}$
where
$\hat{\sigma}^2 = (Y - \hat{Y})'W(Y - \hat{Y}) / (n - 1)$
Where $Y - \hat{Y}$ is the residuals and $n$ the number of subjects.
In the case where X is a 't-test', c.q., just one column of 1's. It can be simplified to:
$\beta_{wls,i} = \frac{wi}{\sum_{i=0}^m{w_i}} \times Y_i$
Check for single voxel, which is easier:
W = W_ill[:, :, 20, 20]
Y = Z[:, 20, 20]
G = np.ones((W_ill.shape[0], 1))
import statsmodels.api as sm
beta = pinv(G.T.dot(W).dot(G)).dot(G.T).dot(W).dot(Y)
Y_ = G.dot(beta)
s2 = (Y - Y_).T.dot(W).dot((Y - Y_))/(Y.shape[0] - 1)
wls_V = s2 *pinv((G.T.dot(W).dot(G)))
print beta, wls_V
wls_fit = sm.WLS(Y, G, np.diag(W)).fit()
print wls_fit.params, wls_fit.cov_params()
[-53.02997352] [[ 52.24131969]] [-53.02997352] [[ 52.24131969]]
Now translate it to >2D case using einsum
beta = np.einsum('i...,ij...->...i', G, W_ill)
beta = 1. / np.einsum('...i,i...->...', beta, G)
beta = np.einsum('...,i...->...i', beta, G)
beta = np.einsum('...i,ij...->j...', beta, W_ill)
beta = np.einsum('i...,i...->...', beta, Z)
residuals = (Z - beta)
s2 = np.einsum('i...,ij...->...i', residuals, W_ill)
s2 = np.einsum('...i,i...->...', s2, residuals)
s2 /= (residuals.shape[0] - 1)
wls_V = 1. / W_ill.sum(0).sum(0) # inv(G.T.dot(W).dot(G))
wls_V *= s2
Now we can map a t-map of the in-limbo test
t_in_limbo = beta / np.sqrt(wls_V)
plt.imshow(t_in_limbo, cmap=plt.cm.hot)
plt.colorbar()
plt.title('t-values for in limbo effect')
<matplotlib.text.Text at 0x7a92110>
Get $\alpha = .05$ threshold.
threshold = sp.stats.t(len(ms_data) - 1).ppf(0.95)
And now we arrive at the final SPMs
plt.figure(figsize=(16,6))
cmap = plt.cm.hot
cmap.set_bad('black')
plt.subplot(131)
plt.imshow(z, cmap=cmap, vmin=z.min(), vmax=z.max())
plt.grid(False)
plt.title('Unthresholded z-map', fontsize=20)
plt.subplot(132)
plt.imshow(np.ma.masked_array(z_threshold, z_threshold == 0), cmap=cmap, vmin=z.min(), vmax=z.max())
plt.grid(False)
plt.title('Thresholded z-map', fontsize=20)
plt.subplot(133)
plt.imshow(np.ma.masked_where((z_threshold != 0) | ((z_threshold == 0) & (t_in_limbo > threshold)), z),
cmap=cmap,
vmin=z.min(), vmax=z.max() )
plt.title('In limbo regions', fontsize=20)
plt.grid(False)