This notebook aims to simulate the auditory environment of a racquetball court in order to answer this stackexchange question by Max Rabin.
We aim to solve the wave equation: $$ \frac{\partial^2 p}{\partial t^2} = c^2 \nabla^2 p $$ where $p$ denotes the over pressure, and $c$ the speed of sound, all inside a 2D box of size 20' by 40' (the offical racquetball court dimensions).
This gives us a partial differential equation to solve, subject to boundary conditions that are supposed to represent the reflecting walls, for which we will use
$$ \nabla p \cdot \hat n = - c \eta \frac{\partial p}{\partial t} $$Where the left hand side denotes the normal component of the gradient of our pressure field at a wall boundary, and the right is proportional to the time derivative of the pressure field at the wall.
# imports
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.linalg as la
from scipy.interpolate import RectBivariateSpline
import numpy.polynomial.chebyshev as npcheb
import os, sys
from time import time
import base64
from IPython.display import clear_output, HTML, Audio, display, Image
from itertools import count
%matplotlib inline
# Constants
c = 2*56.3 # in racketball widths / sec
eta = 1e-2 # our impedance parameter
We will do the simulation by representing our pressure field on a chebyshev grid for increased accuracy.
This means the samples of our field are unequally spaced along the x and z direction, but this gives much better convergence.
In order to do so, we need to know what the sample points are and how to take derivatives on this lattice, for which we use the following utility function
def chebdiff(N):
""" Return the sampling points and differentiation matrix for a
chebyshev polynomial interpolant of the specified size """
if N==0: return 0.0
x = npcheb.chebpts2(N+1)
c = (np.r_[2, np.ones(N-1), 2])*(-1)**(np.arange(N+1))
dX = x[:,None] - x[None,:]
D = ( c[:,None]*(1./c[None,:]) )/( dX + np.eye(N+1) )
D = D - np.diag(D.sum(1))
return x, D
Now we proceed to set up the simulation with some chosen parameters and initial conditions.
# Simulation setup
Nx = Nz = 64
ptsx, Dx = chebdiff(Nx-1)
ptsz, Dz = chebdiff(Nz-1)
ptsz = ptsz*2 + 2 # scale the z points to be from 0 to 4
Dz = Dz/2 # scale the derivative matrix to match this scaling
# compute second derivative matrices
Dx2 = Dx.dot(Dx)
Dz2 = Dz.dot(Dz)
# Extract the first and last rows of these matrices for convenience
Dxl = Dx[0]
Dxr = Dx[-1]
Dzl = Dz.T[:,0]
Dzr = Dz.T[:,-1]
# Repeat the zs and xs into a two grid
zz,xx = np.meshgrid(ptsz,ptsx)
rate = 44100 # sampling rate for sound
# set our stepsize to be OVERSTEP steps per sample
OVERSAMP = 1
dt = (1./rate)/(OVERSAMP + 0.)
# initial conditions for simulation
x0, z0 = 0.9, 0.1
# initial pressure is a gaussian centered at (x0, z0) of size sigma
sigma = 0.06
p = np.exp(-((xx - x0)**2 + (zz - z0)**2)/(2*sigma**2))
# previous time step was a smaller version of this gaussian
tau = 0.01
pold = p * np.exp(-dt/tau)
# initial velocity field is just a first order estimate
v = (p-pold)/dt
# set the initial pressures and velocities on the walls to be zero
p[0,:] = p[-1,:] = 0
p[:,0] = p[:,-1] = 0
v[0,:] = v[-1,:] = 0
v[:,0] = v[:,-1] = 0
Since our function is defined on a chebyshev grid, in order to view it in its proper form, we need to do a spline fit to this unequally spaced rectangular grid
f = RectBivariateSpline(ptsx,ptsz, p)
xi,zi = np.ogrid[-1:1:100j, 0:4:200j]
#setup nice figure for saving frames
fig = plt.figure(frameon=False)
fig.set_size_inches(6,3)
ax = plt.Axes(fig, [0.,0.,1.,1.])
ax.set_axis_off()
fig.add_axes(ax)
impic = ax.imshow( f(xi, zi), vmin=-0.1, vmax=0.1, aspect='auto', cmap=plt.cm.coolwarm)
# Create a directory to save our images to
try:
os.mkdir("/tmp/pics")
except OSError:
print "Directory already exists"
Directory already exists
# our update equation calculator
def get_dp_dv(p,v):
""" Compute our updates given a p and v field """
dp = np.zeros_like(v)
# dp = v on the interior of the room
dp[1:-1,1:-1] = v[1:-1,1:-1]
# enfource the boundary conditions on the walls
dp[:,0] += c*eta*p.dot(Dzl)
dp[:,-1] += -c*eta*p.dot(Dzr)
dp[0,:] += c*eta*Dxl.dot(p)
dp[-1,:] += -c*eta*Dxr.dot(p)
# wave equation dv = d2p = c^2 lapp (p)
dv = c**2 * (Dx2.dot(p) + p.dot(Dz2.T))
# enforce boundary conditions
dv[:,0] += c*eta*v.dot(Dzl)
dv[:,-1] += -c*eta*v.dot(Dzr)
dv[0,:] += c*eta*Dxl.dot(v)
dv[-1,:] += -c*eta*Dxr.dot(v)
return dp,dv
STEPS = int(2/dt) # simulate for 2 seconds
# set up lists to track conditions
pvals = [p.copy()]
vvals = [v.copy()]
samplepoints = np.array([[13,-13],[13,32],[13,13],[32,-13],[32,32],[32,13],[-13,-13],[-13,32],[-13,13]])
sound = np.zeros((samplepoints.shape[0],STEPS))
start_time = time()
imcounter = count(0)
# Actual simulation
for i in xrange(STEPS):
# do RK4 update
dp1,dv1 = get_dp_dv(p,v)
dp2,dv2 = get_dp_dv(p+dt/2*dp1, v+dt/2*dv1)
dp3,dv3 = get_dp_dv(p+dt/2*dp2, v+dt/2*dv2)
dp4,dv4 = get_dp_dv(p+dt*dp3, v+dt*dv3)
p = p+dt/6 *( dp1 + 2*dp2 + 2*dp3 + dp4 )
v = v+dt/6 *( dv1 + 2*dv2 + 2*dv3 + dv4 )
# save the sound as viewed at a particular coordinate on the grid
if i%OVERSAMP==0:
for j, loc in enumerate(samplepoints):
sound[j,i] = p[loc[0],loc[1]]
# Progress
if i%(OVERSAMP*1000) == 0:
clear_output()
current_time = time()
print "iter: {} progress: {:.3%} elapsed time: {:.0f}s est remaining: {:.0f}s".format(i,
i/(STEPS+0.), current_time-start_time, (current_time-start_time)/(i+1) * (STEPS - i + 0.) )
sys.stdout.flush()
# save output figures to /tmp/pics only every X samples
if i%(OVERSAMP*100) == 0:
pvals.append(p.copy())
f = RectBivariateSpline(ptsx,ptsz, p )
impic.set_data(f(xi,zi))
impic.figure.savefig("/tmp/pics/wave{:06d}.png".format(imcounter.next()),dpi=50)
iter: 88000 progress: 99.773% elapsed time: 88s est remaining: 0s
# look at last step
f = RectBivariateSpline(ptsx,ptsz, pvals[0])
impic.set_data(f(xi,zi))
fig
# make an mp4 of simulation
#for mac
!ffmpeg -v warning -i /tmp/pics/wave%06d.png -tune stillimage -pix_fmt yuv420p -c:v libx264 -tune stillimage /tmp/bounce.mp4
#for ubuntu
#!avconv -v warning -i /tmp/pics/wave%06d.png -tune stillimage -pix_fmt yuv420p -c:v libx264 -tune stillimage /tmp/bounce.mp4
# embed the video in the ipython notebook
vidraw = base64.encodestring(open("/tmp/bounce.mp4","rb").read())
HTML("""<video controls alt="test" src="data:video/mp4;base64,{}">""".format(vidraw))
#make an animated gif of the first 100 of them
!convert /tmp/pics/wave0000??.png /tmp/bounce.gif
# look at sound(s)
plt.plot((np.arange(STEPS) + 0.)/rate, sound[0], alpha=0.7);
plt.xlabel("time (s)");
plt.yticks([]);
plt.ylim((-0.1,0.1));
# Look at distribution of sample points
from matplotlib.patches import Ellipse
fig2,axs = plt.subplots(figsize=(8,4))
for i,loc in enumerate(samplepoints):
plt.text(ptsz[loc[1]],-ptsx[loc[0]],i)
axs.add_artist(Ellipse((z0,-x0), width=0.2, height=0.2,color='r', alpha=0.5, clip_on=False))
plt.draw()
plt.ylim((-1,1));
plt.xlim((0,4));
plt.xticks([]);
plt.yticks([]);
# Listen to sampled audio at our different locations
for i in xrange(9):
print "pk={}, loc=({:.2f},{:.2f})".format(i, ptsx[samplepoints[i][0]], ptsz[samplepoints[i][1]] )
display( Audio(sound[i], rate=rate) )
pk=0, loc=(-0.80,3.65)
pk=1, loc=(-0.80,2.05)
pk=2, loc=(-0.80,0.41)
pk=3, loc=(0.02,3.65)
pk=4, loc=(0.02,2.05)
pk=5, loc=(0.02,0.41)
pk=6, loc=(0.83,3.65)
pk=7, loc=(0.83,2.05)
pk=8, loc=(0.83,0.41)
#attempt to look at spectrogram
fig, axs = plt.subplots(3,3, sharex=True, sharey=True)
#correct the layout so it corresponds to our other pictures
order = [2,1,0,5,4,3,8,7,6]
for i,j in enumerate(order):
ax = axs.flat[i]
ax.specgram(sound[j],noverlap=1500,NFFT=2048, Fs=44100, scale_by_freq=True, cmap=plt.cm.afmhot);
ax.set_ylim((0,1000));
#ax.set_cmap(plt.cm.afmhot)
#plt.colorbar();
ax.set_xlim((0,1.9));
ax.images[0].set_clim((-130,-20))
plt.tight_layout();
# Save the sound as a wav in the tmp directory
from scipy.io.wavfile import write as wavwrite
outsound = np.array(sound)
outsound /= np.abs(outsound).max()
outsound *= 32766*1.0
outsound = outsound.astype('int16')
wavwrite("/tmp/sound.wav", 44100, outsound)