import os import concurrent.futures import numpy as np from utils import utils, video a_data = video.load_table('A') b_data = video.load_table('B') metadata = video.load_metadata() a_index, b_index = [], [] for b in ['block 1', 'block 2', 'block 3']: a_index.extend(utils.f_range(metadata[b]['start'], metadata[b]['end'], 0.5)) for b in ['block 5', 'block 6', 'block 7']: b_index.extend(utils.f_range(metadata[b]['start'], metadata[b]['end'], 0.5)) groups = {'A': a_data.loc[a_index], 'B': b_data.loc[b_index]} for name, group_data in sorted(groups.items()): exists = group_data.notnull().as_matrix().flatten() exist_percentage = len(exists[exists == True]) / len(exists) print('Group {}, percentage of not NaN values: {}'.format(name, 100 * exist_percentage)) group_data.fillna(method='ffill', inplace=True) known_ys = metadata['static points']['absolute'] a_known_xs = metadata['static points']['group a'] b_known_xs = metadata['static points']['group b'] with concurrent.futures.ProcessPoolExecutor() as executor: future_a = executor.submit(video.transform, groups['A'], a_known_xs, known_ys) future_b = executor.submit(video.transform, groups['B'], b_known_xs, known_ys) groups['A'] = future_a.result() groups['B'] = future_b.result() lower = -4 upper = 15 rows_list = [] for name in sorted(groups): values = groups[name].values total = len(values.flatten()) below_bounds = len(values[lower > values]) above_bounds = len(values[upper < values]) in_bounds = total - below_bounds - above_bounds rows_list.append([name, 100 * below_bounds / total, 100 * above_bounds / total, 100 * in_bounds / total]) groups[name] = groups[name].clip(lower=lower, upper=upper) print('Group: {}'.format(name)) print('Percentage of values below boundarie: {}'.format(100 * below_bounds / total)) print('Percentage of values above boundarie: {}'.format(100 * above_bounds / total)) print('Total in boundaries: {}'.format(100 * in_bounds / total)) print() benches = [] # the positioning of the bench according to the two videos (different groups) for name, known_xs in [('a', a_known_xs), ('b', b_known_xs)]: A, b = video.solve_affine_transformation(known_xs, known_ys) benches.append(video.transform_vec(metadata['bench']['group {}'.format(name)], A, b)) # distance between positions (error wise) distance = utils.euclidean_distance(*benches) print('Distance of the error between the position values:', distance, '(meters)') # position of the bench will be the mean of the two bench_pos = np.array(benches).mean(axis=0) for name, group in groups.items(): group.index.name = 'frame' group.to_csv(os.path.join('cooked', 'video_group_{}.csv'.format(name.lower()))) np.savetxt(os.path.join('cooked', 'video_boundaries.csv'), (lower, upper)) np.savetxt(os.path.join('cooked', 'video_bench_pos.csv'), bench_pos)