As much on a dare as anything else, I decided to add sound to weather video I was working on. This builds on the earlier notebooks collected at https://github.com/bitsofbits/GIS, so if you haven't looked at those yet, please check them out first. This won't make much sense otherwise.
import numpy as np
First, grab and preprocess the rainfall data in the same way we did for animating rainfall.
from datetime import datetime
from animate_rainfall import load_station_data, normalized_hourly_rainfall
from datetime_range import datetime_range
station_data = load_station_data()
all_times = datetime_range(datetime(2014, 9, 7, 12, 0), datetime(2014, 9, 9, 12, 1))
rainfall = normalized_hourly_rainfall(station_data, all_times)
FPS = 60
Then we create the sound based on the total rainfall at any given point in time. We start with a wav
file of rain falling and repeat it till it's long enough to cover the whole video. Then we simply scale the amplitude of the amplitude points in sound waveform by the total amount of rain [falling](Specifically, the sum of the rainfall at all stations in the last hour) at that time in the video. Since we have many more sound samples than rainfall samples, we first need to do some interpolation.
import wave
import struct
times = all_times
#"Rain"(http://soundbible.com/1999-Rain.html) by Pwlae is licensed under CC BY 3.0
#(https://creativecommons.org/licenses/by/3.0/)
source = wave.open('Rain-Pwlae-390675481.wav')
params = source.getparams()
frame_ratio = source.getframerate() // FPS
nframes = source.getnframes()
nchannels = source.getnchannels()
rate = source.getframerate()
video_frames = len(times)
video_duration = float(video_frames) / FPS
wav_duration = float(nframes) / (nchannels * rate)
repeats = np.ceil(video_duration / wav_duration)
desired_timepoints = video_frames * frame_ratio
raw_audio = source.readframes(nframes)
sample_width = source.getsampwidth()
audio_data = (raw_audio * repeats)[:sample_width * nchannels * desired_timepoints]
source.close()
# The extra 0s take care of the case where we don't line up with frames at start or possibly, end.
rainfall_times = sorted(rainfall.keys())
rainfall_frame_locations = [-1e9] + [(x - times[0]).total_seconds() for x in rainfall_times] + [1e9]
rainfall_totals = np.array([rainfall[x].sum() for x in rainfall_times])
rainfall_frame_totals = ([rainfall_totals[0]] +
list(rainfall_totals / rainfall_totals.max()) +
[rainfall_totals[-1]])
desired_times = [(x-times[0]).total_seconds() for x in times]
scale = np.interp(np.linspace(0, desired_times[-1], desired_timepoints),
rainfall_frame_locations, rainfall_frame_totals)
# compress scale so that we get max volume at a alpha*max
alpha = 0.5
scale = np.minimum((scale / alpha), 1.0)
scaled_audio = ""
for i in range(desired_timepoints):
for j in range(nchannels):
os = sample_width * (i*nchannels + j)
bytes = audio_data[os:os+sample_width]
[value] = struct.unpack("<h", bytes)
new_value = int(round(value * scale[i]))
new_bytes = struct.pack("<h", new_value)
scaled_audio += new_bytes
sink = wave.open('processed_rain_sound.wav', 'wb')
sink.setparams(params)
sink.setnframes(desired_timepoints*nchannels)
sink.writeframes(scaled_audio)
sink.close()
from subprocess import call
call(["ffmpeg", "-i", "September_8th_storm_rainfall_radar.m4v",
"-i", "processed_rain_sound.wav",
"-y", "September_8th_storm_rainfall_radar_sound.m4v"])
0
from IPythonVideo import video
video("September_8th_storm_rainfall_radar_sound.m4v", "x-m4v")