|
| 1 | +#!/usr/bin/env python3.6 |
| 2 | + |
| 3 | +import csv |
| 4 | +import os.path |
| 5 | +import sys |
| 6 | + |
| 7 | +class CsvReport: |
| 8 | + def __init__(self, header, body, footer): |
| 9 | + self.header = header |
| 10 | + self.body = body |
| 11 | + self.footer = footer |
| 12 | + |
| 13 | + |
| 14 | + def merge(self, csv_report): |
| 15 | + # Appends a new csv_report object to the current instance, |
| 16 | + # grouping is done by the key for header, body and footer |
| 17 | + # csv_report - CsvReport - |
| 18 | + existing_run_count = 0 |
| 19 | + |
| 20 | + for name, value in self.body.items(): |
| 21 | + existing_run_count = len(value) |
| 22 | + new_value = csv_report.body.get(name, ['']) |
| 23 | + self.body[name].append(new_value[0]) |
| 24 | + |
| 25 | + if new_value != ['']: del csv_report.body[name] |
| 26 | + |
| 27 | + self.header['Name'].append(csv_report.header['Name'][0]) |
| 28 | + |
| 29 | + if len(csv_report.body) > 0: |
| 30 | + csv_report.body = { name: prepend_runs(run_time, existing_run_count) for name, run_time in csv_report.body.items()} |
| 31 | + self.body.update(csv_report.body) |
| 32 | + |
| 33 | + self.footer['Aggregated'].append(csv_report.footer['Aggregated'][0]) |
| 34 | + |
| 35 | + return self |
| 36 | + |
| 37 | + def to_list(self): |
| 38 | + # Converts header, body and footer to a list of lists representing rows in a csv file |
| 39 | + data = [] |
| 40 | + csv_parts = (self.header.items(), self.body.items(), self.footer.items()) |
| 41 | + |
| 42 | + for part in csv_parts: |
| 43 | + for name, values in part: |
| 44 | + data.append([name] + values) |
| 45 | + |
| 46 | + return data |
| 47 | + |
| 48 | + |
| 49 | +def usage(): |
| 50 | + print(""" |
| 51 | +This script aggregates the average response times for each request defined in Locust config files into |
| 52 | +one file /home/andrei.filipovici/performance_results/locust_aggregated_report.csv |
| 53 | +
|
| 54 | +Usage: ./ext/bin/locust-load-tests-aggregator /absolute/path/to/locust_stats.csv""") |
| 55 | + |
| 56 | +sys_args = sys.argv[1:] |
| 57 | +if sys_args == [] or sys_args[0] == '--help': |
| 58 | + usage() |
| 59 | + exit(0) |
| 60 | + |
| 61 | +new_report_path = sys_args[0] |
| 62 | +sha_timestamp = new_report_path.split('/')[-2:-1] |
| 63 | +aggregated_report_path = '/home/andrei.filipovici/performance_results/locust_aggregated_report.csv' |
| 64 | + |
| 65 | + |
| 66 | +def iterator_to_dict(iterator, key_index, filter = []): |
| 67 | + # Transforms an iterable object into a dictionary |
| 68 | + # iterator - iterable object |
| 69 | + # key_index - int - will be set as key for the returned dictionary |
| 70 | + # filter - list - list of indices that will be selected for the value of each key |
| 71 | + result = {} |
| 72 | + for row in iterator: |
| 73 | + name = row.pop(key_index) |
| 74 | + if filter: |
| 75 | + row = list(row[i] for i in filter) |
| 76 | + result[name] = row |
| 77 | + return result |
| 78 | + |
| 79 | + |
| 80 | +def prepend_runs(runs, prepend_count, prepend_value=''): |
| 81 | + # Adds empty columns at the beggining of each line in the csv |
| 82 | + # runs - list - list of new columns to be added |
| 83 | + # prepend_count - int - the number of columns to be added |
| 84 | + # prepend_value - string - what value should be prepended to the beggining of the row |
| 85 | + items = [prepend_value] * prepend_count |
| 86 | + items.extend(runs) |
| 87 | + |
| 88 | + return items |
| 89 | + |
| 90 | + |
| 91 | +def write_aggregated_report(data): |
| 92 | + # Write the new aggregated report to the csv file |
| 93 | + # data - list - Rows to be written |
| 94 | + with open(aggregated_report_path, 'w+') as report_file: |
| 95 | + report_writer = csv.writer(report_file, delimiter=',') |
| 96 | + for row in data: |
| 97 | + report_writer.writerow(row) |
| 98 | + |
| 99 | + |
| 100 | +new_aggregated_report = {} |
| 101 | +with open(new_report_path) as stats_file: |
| 102 | + reader = csv.reader(stats_file, delimiter=',') |
| 103 | + _h, *body, footer = reader |
| 104 | + body = iterator_to_dict(body, 1, [4]) |
| 105 | + footer = iterator_to_dict([footer], 1, [4]) |
| 106 | + new_report = CsvReport({'Name': sha_timestamp}, body, footer) |
| 107 | + |
| 108 | + if os.path.isfile(aggregated_report_path) and os.path.getsize(aggregated_report_path): |
| 109 | + with open(aggregated_report_path) as aggregated_report_file: |
| 110 | + agg_reader = csv.reader(aggregated_report_file, delimiter=',') |
| 111 | + agg_header, *agg_body, agg_footer = agg_reader |
| 112 | + agg_header = iterator_to_dict([agg_header], 0) |
| 113 | + agg_body = iterator_to_dict(agg_body, 0) |
| 114 | + agg_footer = iterator_to_dict([agg_footer], 0) |
| 115 | + |
| 116 | + aggregated_report = CsvReport(agg_header, agg_body, agg_footer) |
| 117 | + aggregated_report.merge(new_report) |
| 118 | + else: |
| 119 | + aggregated_report = new_report |
| 120 | + |
| 121 | +write_aggregated_report(aggregated_report.to_list()) |
0 commit comments