Skip to content

Commit a2915f7

Browse files
Make e2e benchmarks more stable (#6927)
1 parent 99cb17a commit a2915f7

File tree

5 files changed

+120
-127
lines changed

5 files changed

+120
-127
lines changed

.github/workflows/osrm-backend.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -656,7 +656,7 @@ jobs:
656656
path: pr
657657
- name: Install dependencies
658658
run: |
659-
python3 -m pip install "conan<2.0.0" "requests==2.31.0" "locust==2.28.0"
659+
python3 -m pip install "conan<2.0.0" "requests==2.31.0" "numpy==1.26.4"
660660
sudo apt-get update -y && sudo apt-get install ccache
661661
- name: Prepare data
662662
run: |

scripts/ci/e2e_benchmark.py

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
import requests
2+
import sys
3+
import random
4+
from collections import defaultdict
5+
import os
6+
import csv
7+
import numpy as np
8+
import time
9+
import argparse
10+
11+
class BenchmarkRunner:
12+
def __init__(self):
13+
self.coordinates = []
14+
self.tracks = defaultdict(list)
15+
16+
gps_traces_file_path = os.path.expanduser('~/gps_traces.csv')
17+
with open(gps_traces_file_path, 'r') as file:
18+
reader = csv.DictReader(file)
19+
for row in reader:
20+
coord = (float(row['Latitude']), float(row['Longitude']))
21+
self.coordinates.append(coord)
22+
self.tracks[row['TrackID']].append(coord)
23+
self.track_ids = list(self.tracks.keys())
24+
25+
def run(self, benchmark_name, host, num_requests, warmup_requests=50):
26+
for _ in range(warmup_requests):
27+
url = self.make_url(host, benchmark_name)
28+
_ = requests.get(url)
29+
30+
times = []
31+
32+
for _ in range(num_requests):
33+
url = self.make_url(host, benchmark_name)
34+
35+
start_time = time.time()
36+
response = requests.get(url)
37+
end_time = time.time()
38+
if response.status_code != 200:
39+
if benchmark_name == 'match':
40+
code = response.json()['code']
41+
if code == 'NoSegment' or code == 'NoMatch':
42+
continue
43+
raise Exception(f"Error: {response.status_code} {response.text}")
44+
times.append((end_time - start_time) * 1000) # convert to ms
45+
46+
return times
47+
48+
def make_url(self, host, benchmark_name):
49+
if benchmark_name == 'route':
50+
start = random.choice(self.coordinates)
51+
end = random.choice(self.coordinates)
52+
53+
start_coord = f"{start[1]:.6f},{start[0]:.6f}"
54+
end_coord = f"{end[1]:.6f},{end[0]:.6f}"
55+
return f"{host}/route/v1/driving/{start_coord};{end_coord}?overview=full&steps=true"
56+
elif benchmark_name == 'table':
57+
num_coords = random.randint(3, 100)
58+
selected_coords = random.sample(self.coordinates, num_coords)
59+
coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords])
60+
return f"{host}/table/v1/driving/{coords_str}"
61+
elif benchmark_name == 'match':
62+
num_coords = random.randint(50, 100)
63+
track_id = random.choice(self.track_ids)
64+
track_coords = self.tracks[track_id][:num_coords]
65+
coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in track_coords])
66+
radiues_str = ";".join([f"{random.randint(5, 20)}" for _ in range(len(track_coords))])
67+
return f"{host}/match/v1/driving/{coords_str}?steps=true&radiuses={radiues_str}"
68+
elif benchmark_name == 'nearest':
69+
coord = random.choice(self.coordinates)
70+
coord_str = f"{coord[1]:.6f},{coord[0]:.6f}"
71+
return f"{host}/nearest/v1/driving/{coord_str}"
72+
elif benchmark_name == 'trip':
73+
num_coords = random.randint(2, 10)
74+
selected_coords = random.sample(self.coordinates, num_coords)
75+
coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords])
76+
return f"{host}/trip/v1/driving/{coords_str}?steps=true"
77+
else:
78+
raise Exception(f"Unknown benchmark: {benchmark_name}")
79+
80+
def main():
81+
parser = argparse.ArgumentParser(description='Run GPS benchmark tests.')
82+
parser.add_argument('--host', type=str, required=True, help='Host URL')
83+
parser.add_argument('--method', type=str, required=True, choices=['route', 'table', 'match', 'nearest', 'trip'], help='Benchmark method')
84+
parser.add_argument('--num_requests', type=int, required=True, help='Number of requests to perform')
85+
86+
args = parser.parse_args()
87+
88+
random.seed(42)
89+
90+
runner = BenchmarkRunner()
91+
times = runner.run(args.method, args.host, args.num_requests)
92+
93+
print(f'Total: {np.sum(times)}ms')
94+
print(f"Min time: {np.min(times)}ms")
95+
print(f"Mean time: {np.mean(times)}ms")
96+
print(f"Median time: {np.median(times)}ms")
97+
print(f"95th percentile: {np.percentile(times, 95)}ms")
98+
print(f"99th percentile: {np.percentile(times, 99)}ms")
99+
print(f"Max time: {np.max(times)}ms")
100+
101+
if __name__ == '__main__':
102+
main()

scripts/ci/locustfile.py

Lines changed: 0 additions & 74 deletions
This file was deleted.

scripts/ci/process_locust_benchmark_results.py

Lines changed: 0 additions & 31 deletions
This file was deleted.

scripts/ci/run_benchmarks.sh

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ function run_benchmarks_for_folder {
1818

1919
FOLDER=$1
2020
RESULTS_FOLDER=$2
21+
SCRIPTS_FOLDER=$3
2122

2223
mkdir -p $RESULTS_FOLDER
2324

@@ -41,32 +42,27 @@ function run_benchmarks_for_folder {
4142
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-customize $FOLDER/data.osrm" "$RESULTS_FOLDER/osrm_customize.bench"
4243
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-contract $FOLDER/data.osrm" "$RESULTS_FOLDER/osrm_contract.bench"
4344

44-
if [ -f "$FOLDER/scripts/ci/locustfile.py" ]; then
45-
for ALGORITHM in mld ch; do
46-
$BINARIES_FOLDER/osrm-routed --algorithm $ALGORITHM $FOLDER/data.osrm &
47-
OSRM_ROUTED_PID=$!
45+
for ALGORITHM in ch mld; do
46+
$BINARIES_FOLDER/osrm-routed --algorithm $ALGORITHM $FOLDER/data.osrm &
47+
OSRM_ROUTED_PID=$!
4848

49-
# wait for osrm-routed to start
50-
curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true"
51-
locust -f $FOLDER/scripts/ci/locustfile.py \
52-
--headless \
53-
--processes -1 \
54-
--users 10 \
55-
--spawn-rate 1 \
56-
--host http://localhost:5000 \
57-
--run-time 1m \
58-
--csv=locust_results_$ALGORITHM \
59-
--loglevel ERROR
49+
# wait for osrm-routed to start
50+
if ! curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true"; then
51+
echo "osrm-routed failed to start for algorithm $ALGORITHM"
52+
kill -9 $OSRM_ROUTED_PID
53+
continue
54+
fi
6055

61-
python3 $FOLDER/scripts/ci/process_locust_benchmark_results.py locust_results_$ALGORITHM $ALGORITHM $RESULTS_FOLDER
56+
for METHOD in route nearest trip table match; do
57+
python3 $SCRIPTS_FOLDER/scripts/ci/e2e_benchmark.py --host http://localhost:5000 --method $METHOD --num_requests 1000 > $RESULTS_FOLDER/e2e_${METHOD}_${ALGORITHM}.bench
58+
done
6259

60+
kill -9 $OSRM_ROUTED_PID
61+
done
6362

64-
kill -0 $OSRM_ROUTED_PID
65-
done
66-
fi
6763

6864
}
6965

70-
run_benchmarks_for_folder $1 "${1}_results"
71-
run_benchmarks_for_folder $2 "${2}_results"
66+
run_benchmarks_for_folder $1 "${1}_results" $2
67+
run_benchmarks_for_folder $2 "${2}_results" $2
7268

0 commit comments

Comments
 (0)