def selection(p,s):
return p * (1+s) / (p * (1+s) + (1-p))
s = 0.01
p0 = 0.01
pt = [p0]
while pt[-1] < 1 - p0:
pt.append(selection(pt[-1], s))
pt = array(pt)
plot(pt)
xlabel('t')
ylabel('p(t)');
plot(log(pt/(1-pt)))
xlabel('t')
ylabel(r'$\log{(\frac{p}{1-p})}$')
<matplotlib.text.Text at 0x98f4c50>
gradient(log(pt/(1-pt)))
[<matplotlib.lines.Line2D at 0xa4dd4e0>]
'%.3f' % (log(2)/2.)
'0.347'
'%.3f' % (-0.1 * log(2))
'-0.069'
$\Rightarrow s=r_m-r_w$ is constant in time.
Selection is frequency and density independent if $i_w=i_m=1$ and $\lambda_w ^{1/f(K_w)}=\lambda_m^{1/f(K_m)}$:
$$ M' = M \cdot \lambda_m ^{1-\frac{M+N}{K_m}} \\ N' = N \cdot \lambda_w ^{1-\frac{M+N}{K_w}} \\ $$logistic_competition = lambda m,n: (lam_m**(1-(m+n)/K_m) * i_m**(n/(n+m)) * m, lam_w**(1-(m+n)/K_w) * i_m**(m/(n+m)) * n)
logistic_isolation = lambda m,n: (lam_m**(1-(m)/K_m) * m, lam_w**(1-(n)/K_w) * n)
M,N=[],[]
m,n = 1e6,1e6
lam_m,lam_w = exp(1.5*0.327),exp(1.5*0.311)
K_w = 128*1e6
K_m = K_w #* log(lam_m) / log(lam_w)
i_m,i_w = 1,1
for _ in range(24):
m,n = logistic_isolation(m,n)
M.append(m)
N.append(n)
M = array(M)
N = array(N)
plot(N, color='b', label="mut")
plot(M, color='g', label="wt")
#plot(M+N, label="tot")
xticks(range(0,24,2))
xlim(0,16)
legend(loc=2)
ylabel('# cells')
xlabel('time (hours)');
<matplotlib.text.Text at 0x205439e8>
plot(gradient(N), label="wt")
plot(gradient(M), label="mut")
plot(gradient(M+N), label="tot")
#yscale('log')
grid(False, 'minor')
legend(loc=2)
ylabel('growth rate')
xlabel('time')
p = M/(M+N)
p = p[1:]
q = log(p/(1-p))
fig,ax = subplots(1,2)
ax[0].plot(q)
ax[0].set_title('log(p/(1-p))')
ax[1].plot(gradient(q))
ax[1].set_title('d log(p/(1-p))/dt')
fig.tight_layout();
s_T1 = log(lam_m/lam_w)
s_T2 = log(2) * log(lam_m/lam_w) / log(lam_w)
print s_T1, s_T2
0.016 0.0356603051092
We let them grow like the above, then sample, then let them grow again, until one wins.
M,N=[],[]
m,n = 1e6,1e6
lam_m,lam_w = exp(0.327),exp(0.311)
K_w = 128*1e6
K_m = K_w #* log(lam_m) / log(lam_w)
i_m,i_w = 1,1
for _ in range(30):
for _ in range(24):
m,n = logistic(m,n)
M.append(m)
N.append(n)
m,n = 1e6*m/float(m+n), 1e6*n/float(m+n)
#m,n = multinomial(200, [m/(m+n), n/(m+n)])
M = array(M)
N = array(N)
plot(N, label="wt")
plot(M, label="mut")
#plot(M+N, label="tot")
#yscale('log')
grid(False, 'minor')
legend(loc=2)
ylabel('# cells')
xlabel('time');
<matplotlib.text.Text at 0x1315a8d0>
from scipy.stats import linregress
p = M/(M+N)
p = p[1:]
q = log(p/(1-p))
slope, intercept, r_value, p_value, std_err = linregress(range(len(q)), q)
abline = [intercept + slope*i for i in range(len(q))]
print 's =', slope
fig,ax = subplots(1,3, figsize=(10,5), sharex=True)
ax[0].plot(p)
ax[0].set_title('p')
ax[0].set_xticks(range(0, len(p)+5*24, 5*24))
ax[0].set_xticklabels(range(0, 2+len(p)/24, 5))
ax[1].plot(q)
ax[1].plot(abline, ls='--')
ax[1].set_title('log(p/(1-p))')
ax[2].plot(gradient(q))
ax[2].set_title('d log(p/(1-p))/dt')
ax[2].axhline(y=slope, ls='--',color='k')
fig.tight_layout();
s = 0.00986516222219