results_to_text.py (3808B)
1#!/usr/bin/env python3 2# 3# Simple benchmarking framework 4# 5# Copyright (c) 2019 Virtuozzo International GmbH. 6# 7# This program is free software; you can redistribute it and/or modify 8# it under the terms of the GNU General Public License as published by 9# the Free Software Foundation; either version 2 of the License, or 10# (at your option) any later version. 11# 12# This program is distributed in the hope that it will be useful, 13# but WITHOUT ANY WARRANTY; without even the implied warranty of 14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15# GNU General Public License for more details. 16# 17# You should have received a copy of the GNU General Public License 18# along with this program. If not, see <http://www.gnu.org/licenses/>. 19# 20 21import math 22import tabulate 23 24# We want leading whitespace for difference row cells (see below) 25tabulate.PRESERVE_WHITESPACE = True 26 27 28def format_value(x, stdev): 29 stdev_pr = stdev / x * 100 30 if stdev_pr < 1.5: 31 # don't care too much 32 return f'{x:.2g}' 33 else: 34 return f'{x:.2g} ± {math.ceil(stdev_pr)}%' 35 36 37def result_to_text(result): 38 """Return text representation of bench_one() returned dict.""" 39 if 'average' in result: 40 s = format_value(result['average'], result['stdev']) 41 if 'n-failed' in result: 42 s += '\n({} failed)'.format(result['n-failed']) 43 return s 44 else: 45 return 'FAILED' 46 47 48def results_dimension(results): 49 dim = None 50 for case in results['cases']: 51 for env in results['envs']: 52 res = results['tab'][case['id']][env['id']] 53 if dim is None: 54 dim = res['dimension'] 55 else: 56 assert dim == res['dimension'] 57 58 assert dim in ('iops', 'seconds') 59 60 return dim 61 62 63def results_to_text(results): 64 """Return text representation of bench() returned dict.""" 65 n_columns = len(results['envs']) 66 named_columns = n_columns > 2 67 dim = results_dimension(results) 68 tab = [] 69 70 if named_columns: 71 # Environment columns are named A, B, ... 72 tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)]) 73 74 tab.append([''] + [c['id'] for c in results['envs']]) 75 76 for case in results['cases']: 77 row = [case['id']] 78 case_results = results['tab'][case['id']] 79 for env in results['envs']: 80 res = case_results[env['id']] 81 row.append(result_to_text(res)) 82 tab.append(row) 83 84 # Add row of difference between columns. For each column starting from 85 # B we calculate difference with all previous columns. 86 row = ['', ''] # case name and first column 87 for i in range(1, n_columns): 88 cell = '' 89 env = results['envs'][i] 90 res = case_results[env['id']] 91 92 if 'average' not in res: 93 # Failed result 94 row.append(cell) 95 continue 96 97 for j in range(0, i): 98 env_j = results['envs'][j] 99 res_j = case_results[env_j['id']] 100 cell += ' ' 101 102 if 'average' not in res_j: 103 # Failed result 104 cell += '--' 105 continue 106 107 col_j = tab[0][j + 1] if named_columns else '' 108 diff_pr = round((res['average'] - res_j['average']) / 109 res_j['average'] * 100) 110 cell += f' {col_j}{diff_pr:+}%' 111 row.append(cell) 112 tab.append(row) 113 114 return f'All results are in {dim}\n\n' + tabulate.tabulate(tab) 115 116 117if __name__ == '__main__': 118 import sys 119 import json 120 121 if len(sys.argv) < 2: 122 print(f'USAGE: {sys.argv[0]} results.json') 123 exit(1) 124 125 with open(sys.argv[1]) as f: 126 print(results_to_text(json.load(f)))