Skip to content

Commit 682d4d0

Browse files
Merge pull request #3561 from sebastian-miclea/PDB-5204
(PDB-5204) Created script to aggregate Locust results
2 parents 1c49818 + fe29a65 commit 682d4d0

File tree

2 files changed

+131
-2
lines changed

2 files changed

+131
-2
lines changed
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
#!/usr/bin/env python3.6
2+
3+
import csv
4+
import os.path
5+
import sys
6+
7+
class CsvReport:
8+
def __init__(self, header, body, footer):
9+
self.header = header
10+
self.body = body
11+
self.footer = footer
12+
13+
14+
def merge(self, csv_report):
15+
# Appends a new csv_report object to the current instance,
16+
# grouping is done by the key for header, body and footer
17+
# csv_report - CsvReport -
18+
existing_run_count = 0
19+
20+
for name, value in self.body.items():
21+
existing_run_count = len(value)
22+
new_value = csv_report.body.get(name, [''])
23+
self.body[name].append(new_value[0])
24+
25+
if new_value != ['']: del csv_report.body[name]
26+
27+
self.header['Name'].append(csv_report.header['Name'][0])
28+
29+
if len(csv_report.body) > 0:
30+
csv_report.body = { name: prepend_runs(run_time, existing_run_count) for name, run_time in csv_report.body.items()}
31+
self.body.update(csv_report.body)
32+
33+
self.footer['Aggregated'].append(csv_report.footer['Aggregated'][0])
34+
35+
return self
36+
37+
def to_list(self):
38+
# Converts header, body and footer to a list of lists representing rows in a csv file
39+
data = []
40+
csv_parts = (self.header.items(), self.body.items(), self.footer.items())
41+
42+
for part in csv_parts:
43+
for name, values in part:
44+
data.append([name] + values)
45+
46+
return data
47+
48+
49+
def usage():
50+
print("""
51+
This script aggregates the average response times for each request defined in Locust config files into
52+
one file /home/andrei.filipovici/performance_results/locust_aggregated_report.csv
53+
54+
Usage: ./ext/bin/locust-load-tests-aggregator /absolute/path/to/locust_stats.csv""")
55+
56+
sys_args = sys.argv[1:]
57+
if sys_args == [] or sys_args[0] == '--help':
58+
usage()
59+
exit(0)
60+
61+
new_report_path = sys_args[0]
62+
sha_timestamp = new_report_path.split('/')[-2:-1]
63+
aggregated_report_path = '/home/andrei.filipovici/performance_results/locust_aggregated_report.csv'
64+
65+
66+
def iterator_to_dict(iterator, key_index, filter = []):
67+
# Transforms an iterable object into a dictionary
68+
# iterator - iterable object
69+
# key_index - int - will be set as key for the returned dictionary
70+
# filter - list - list of indices that will be selected for the value of each key
71+
result = {}
72+
for row in iterator:
73+
name = row.pop(key_index)
74+
if filter:
75+
row = list(row[i] for i in filter)
76+
result[name] = row
77+
return result
78+
79+
80+
def prepend_runs(runs, prepend_count, prepend_value=''):
81+
# Adds empty columns at the beggining of each line in the csv
82+
# runs - list - list of new columns to be added
83+
# prepend_count - int - the number of columns to be added
84+
# prepend_value - string - what value should be prepended to the beggining of the row
85+
items = [prepend_value] * prepend_count
86+
items.extend(runs)
87+
88+
return items
89+
90+
91+
def write_aggregated_report(data):
92+
# Write the new aggregated report to the csv file
93+
# data - list - Rows to be written
94+
with open(aggregated_report_path, 'w+') as report_file:
95+
report_writer = csv.writer(report_file, delimiter=',')
96+
for row in data:
97+
report_writer.writerow(row)
98+
99+
100+
new_aggregated_report = {}
101+
with open(new_report_path) as stats_file:
102+
reader = csv.reader(stats_file, delimiter=',')
103+
_h, *body, footer = reader
104+
body = iterator_to_dict(body, 1, [4])
105+
footer = iterator_to_dict([footer], 1, [4])
106+
new_report = CsvReport({'Name': sha_timestamp}, body, footer)
107+
108+
if os.path.isfile(aggregated_report_path) and os.path.getsize(aggregated_report_path):
109+
with open(aggregated_report_path) as aggregated_report_file:
110+
agg_reader = csv.reader(aggregated_report_file, delimiter=',')
111+
agg_header, *agg_body, agg_footer = agg_reader
112+
agg_header = iterator_to_dict([agg_header], 0)
113+
agg_body = iterator_to_dict(agg_body, 0)
114+
agg_footer = iterator_to_dict([agg_footer], 0)
115+
116+
aggregated_report = CsvReport(agg_header, agg_body, agg_footer)
117+
aggregated_report.merge(new_report)
118+
else:
119+
aggregated_report = new_report
120+
121+
write_aggregated_report(aggregated_report.to_list())

ext/bin/run-locust-load-tests

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,11 @@ while ! grep -Fq "Finished database garbage collection" "$output_dir/pdb_log.txt
7272
done
7373
echo
7474

75-
echo "Starting locust warm-up\n"
75+
echo "Starting locust warm-up"
7676
# We use the same filename to have them overwritten by the test run.
7777
python3.6 ./locust/run-load-test -t 5m -u 20 --csv "$output_dir/locust" --html "$output_dir/locust.html" --only-summary -T all
7878

79-
echo "Starting locust load tests\n"
79+
echo "Starting locust load tests"
8080
python3.6 ./locust/run-load-test -t 15m -u 20 --csv "$output_dir/locust" --html "$output_dir/locust.html" --only-summary -T all
8181
test_exit=$?
8282

@@ -85,3 +85,11 @@ if [ $test_exit -ne 0 ]; then
8585
fi
8686

8787
printf "Tests ran successfully. Output files are: %q_*.csv\n" "$output_dir/locust"
88+
89+
printf "Starting aggregator script"
90+
python3.6 ./ext/bin/locust-load-tests-aggregator "$output_dir/locust_stats.csv"
91+
aggregator_exit=$?
92+
93+
if [ $aggregator_exit -eq 0 ]; then
94+
printf "Aggregated successfully. Output file: %q\n" "/home/andrei.filipovici/performance_results/locust_aggregated_report.csv"
95+
fi

0 commit comments

Comments
 (0)