Let $\Omega =(0,\pi)\times (0,2\pi)$ and let $\psi:\Omega\to \mathbb{R}$ defined by $$\psi(x,t)=\sum_{k^2-j^2=-\lambda_0}a_{kj}\sin(kx)\sin(jt)+b_{kj}\sin(kx)\cos(jt),$$ where $\lambda_0\in \mathbb Z\setminus\{0\}$ fixed and $\int_\Omega \psi^2 = 1$. Let $\alpha,\beta>0$ and $E_0=\{(x,t)\in\Omega\,:\,|\psi(x,t)|<\epsilon^\beta\}$. Then, if $0<\alpha<\beta<1$, there are $M,\epsilon_0>0$ such that if $\epsilon\in(0,\epsilon_0)$ then $\mu(E_0)<M\epsilon^\alpha$. If $\alpha$ is taken samall enough, $M$ could be 1. If $\alpha>\beta$ there is no such $M$. Obviously this conjeture can be extended in a more general setting, but this is the case we are interested in.
#Instruction for inline graphics
%matplotlib inline
#Plotting library
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#Basic mathematical functions library
import numpy as np
#Numerical analysis library integration funcitons
from scipy.integrate import quad
from scipy.integrate import dblquad
We are going to define a trigonometric polinomial correspondign to $-\lambda_0=15$. The posible values for the couples $k,j$ are $(4,1)$ and $(8,7)$.
def psi(x,t):
return 0.434777248914389*np.sin(4*x)*np.sin(t)-0.1*np.sin(4*x)*np.cos(t)+0.05*np.sin(8*x)*np.sin(7*t)-(1/30)*np.sin(8*x)*np.cos(7*t);
dblquad(lambda x, t: (psi(x,t))**2,0,2*np.pi, lambda x: 0, lambda x: np.pi)[0]
1.0000000000000007
It's clear thath the chosen $\psi$ satisface the conditions of the conjeture.
def area(eps,beta,N):
A = 0;
Dx = np.pi/N;
Dy = 2*np.pi/(2*N);
for i in range(0,N):
for j in range(0,2*N):
if (np.abs(psi(i*Dx,j*Dy)) < eps**(beta)):
A = A + Dx*Dy;
return A;
#This is the steps of the area calculation (the N)
max_area_steps = 300;
#This is the max plot steps
max_plot_steps = 50;
#Defining the $\epsilon_0$
epsilon_not = 0.01
x = np.linspace(0,epsilon_not,max_plot_steps);
#y is the vector of the areas of E_0
def area_vector(beta):
y = np.linspace(0,epsilon_not,max_plot_steps);
for i in range(0,max_plot_steps):
y[i] = area(x[i],beta,max_area_steps);
return y;
#z is the vector of the estimators $M\epsilon^\alpha$
def estimator(alpha,M):
z = np.linspace(0,epsilon_not,max_plot_steps);
for i in range(0,max_plot_steps):
z[i] = M*x[i]**(alpha);
return z;
In the next code we are going to graph for different values of $\alpha$, $\beta$ and $M$ the curves $\mu(E_0)$ and $\epsilon^\alpha$ depending on $\epsilon$
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, (ax1,ax2,ax3) = plt.subplots(1, 3, sharey=False, figsize=(20,5))
alpha1 = 0.2
beta1 = 0.5
M1 = 300
alpha2 = 0.001
beta2 = 1
M2 = 1
alpha3 = 0.3
beta3 = 0.7
M3 = 20
ax1.plot(x,area_vector(beta1),label="$\\mu(E_0)$");
ax1.plot(x,estimator(alpha1,M1), label="$M\\epsilon^\\alpha$");
ax1.set_title("$\\alpha=$" + str(alpha1) + ", $\\beta=$" + str(beta1) + " y $M=$" + str(M1), fontsize = 25)
ax1.set_xlabel("$\\epsilon$",fontsize = 20)
ax1.legend()
ax2.plot(x,area_vector(beta2),label="$\\mu(E_0)$");
ax2.plot(x,estimator(alpha2,M2), label="$M\\epsilon^\\alpha$");
ax2.set_title("$\\alpha=$" + str(alpha2) + ", $\\beta=$" + str(beta2) + " y $M=$" + str(M2), fontsize = 25)
ax2.set_xlabel("$\\epsilon$",fontsize = 20)
ax2.legend()
ax3.plot(x,area_vector(beta3),label="$\\mu(E_0)$");
ax3.plot(x,estimator(alpha3,M3), label="$M\\epsilon^\\alpha$");
ax3.set_title("$\\alpha=$" + str(alpha3) + ", $\\beta=$" + str(beta3) + " y $M=$" + str(M3), fontsize = 25)
ax3.set_xlabel("$\\epsilon$",fontsize = 20)
ax3.legend()
plt.show()
In the preciding graphics the estimate $M\epsilon^\alpha$ is above the graph of $\mu(E_0)$ for small enough $\epsilon$. You can change the values of $\alpha$, $\beta$ and $M$ to test the conjeture.
In the case $\alpha>\beta$ there is no $M$ such that $\mu(E_0)\leq M\alpha^\beta$. In the next code we graph the situation
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, (ax1,ax2,ax3) = plt.subplots(1, 3, sharey=False, figsize=(20,5))
alpha1 = 0.7
beta1 = 0.4
M1 = 1
alpha2 = 0.7
beta2 = 0.4
M2 = 50
alpha3 = 0.7
beta3 = 0.4
M3 = 1000
ax1.plot(x,area_vector(beta1),label="$\\mu(E_0)$");
ax1.plot(x,estimator(alpha1,M1), label="$M\\epsilon^\\alpha$");
ax1.set_title("$\\alpha=$" + str(alpha1) + ", $\\beta=$" + str(beta1) + " y $M=$" + str(M1), fontsize = 25)
ax1.set_xlabel("$\\epsilon$",fontsize = 20)
ax1.legend()
ax2.plot(x,area_vector(beta2),label="$\\mu(E_0)$");
ax2.plot(x,estimator(alpha2,M2), label="$M\\epsilon^\\alpha$");
ax2.set_title("$\\alpha=$" + str(alpha2) + ", $\\beta=$" + str(beta2) + " y $M=$" + str(M2), fontsize = 25)
ax2.set_xlabel("$\\epsilon$",fontsize = 20)
ax2.legend()
ax3.plot(x,area_vector(beta3),label="$\\mu(E_0)$");
ax3.plot(x,estimator(alpha3,M3), label="$M\\epsilon^\\alpha$");
ax3.set_title("$\\alpha=$" + str(alpha3) + ", $\\beta=$" + str(beta3) + " y $M=$" + str(M3), fontsize = 25)
ax3.set_xlabel("$\\epsilon$",fontsize = 20)
ax3.legend()
plt.show()
Aparently the third graphic above satisfice inequality $\mu(E_0)<M\epsilon^\alpha$ but if we look closer we can see that this is false.
epsilon_not = 0.001
alpha3 = 0.7
beta3 = 0.4
M3 = 1000
x = np.linspace(0,epsilon_not,max_plot_steps);
#y is the vector of the areas of E_0
def area_vector(beta):
y = np.linspace(0,epsilon_not,max_plot_steps);
for i in range(0,max_plot_steps):
y[i] = area(x[i],beta,max_area_steps);
return y;
#z is the vector of the estimators $M\epsilon^\alpha$
def estimator(alpha,M):
z = np.linspace(0,epsilon_not,max_plot_steps);
for i in range(0,max_plot_steps):
z[i] = M*x[i]**(alpha);
return z;
plt.plot(x,area_vector(beta3),label="$\\mu(E_0)$");
plt.plot(x,estimator(alpha3,M3), label="$M\\epsilon^\\alpha$");
plt.title("$\\alpha=$" + str(alpha3) + ", $\\beta=$" + str(beta3) + " y $M=$" + str(M3), fontsize = 25)
plt.xlabel("$\\epsilon$",fontsize = 20)
plt.legend()
plt.show()