1
+ import json
2
+
1
3
import matplotlib .pyplot as plt
2
4
import numpy as np
3
- import json
4
5
import pandas as pd
5
6
6
- BENCHMARKS_JSON = ' results.json'
7
+ BENCHMARKS_JSON = " results.json"
7
8
8
9
# Hardware details shown in title
9
10
HARDWARE = "AMD Ryzen 9 9900X 12-Core Processor 63032 MB (fp64 fp16)\n oneAPI 2025.1.3 Intel(R) OpenCL Graphics: Intel(R) Arc(TM) B580 Graphics, 11873 MB (fp64 fp16)"
12
13
SHOW_NUMBERS = True
13
14
14
15
# Round to digits after decimal
15
- ROUND_NUMBERS = 1
16
+ ROUND_NUMBERS = 1
16
17
17
18
# package list in graph order; arrayfire packages are added later
18
- PKG_NAMES = [
19
- 'numpy' ,
20
- 'dpnp' ,
21
- 'cupy'
22
- ]
19
+ PKG_NAMES = ["numpy" , "dpnp" , "cupy" ]
23
20
24
21
# color used in graphs
25
22
PKG_COLOR = {
29
26
"afcpu" : "tab:orange" ,
30
27
"afopencl" : "tab:orange" ,
31
28
"afcuda" : "tab:orange" ,
32
- "afoneapi" : "tab:orange"
29
+ "afoneapi" : "tab:orange" ,
33
30
}
34
31
35
32
# labels displayed in the graph
40
37
"afcpu" : "afcpu" ,
41
38
"afcuda" : "afcuda" ,
42
39
"afopencl" : "afopencl[opencl:gpu]" ,
43
- "afoneapi" : "afoneapi[opencl:gpu]"
40
+ "afoneapi" : "afoneapi[opencl:gpu]" ,
44
41
}
45
42
46
- AFBACKENDS = [
47
- 'afcpu' ,
48
- 'afcuda' ,
49
- 'afopencl' ,
50
- 'afoneapi'
51
- ]
43
+ AFBACKENDS = ["afcpu" , "afcuda" , "afopencl" , "afoneapi" ]
52
44
53
45
# Tests to be shown in graphs
54
46
TESTS = [
55
- 'qr' ,
56
- 'neural_network' ,
57
- 'gemm' ,
58
- 'mandelbrot' ,
59
- 'nbody' ,
60
- 'pi' ,
61
- 'black_scholes' ,
62
- 'fft' ,
63
- 'normal' ,
64
- 'group_elementwise' ,
65
-
66
- #Other tests
47
+ "qr" ,
48
+ "neural_network" ,
49
+ "gemm" ,
50
+ "mandelbrot" ,
51
+ "nbody" ,
52
+ "pi" ,
53
+ "black_scholes" ,
54
+ "fft" ,
55
+ "normal" ,
56
+ "group_elementwise" ,
57
+ # Other tests
67
58
# 'svd
68
59
# 'cholesky',
69
60
# 'det',
70
61
# 'norm',
71
62
# 'uniform',
72
63
# 'inv'
73
- ]
64
+ ]
65
+
74
66
75
67
def get_benchmark_data ():
76
68
results = {}
77
69
descriptions = {}
78
70
with open (BENCHMARKS_JSON ) as f :
79
71
js = json .load (f )
80
- for bench in js [' benchmarks' ]:
72
+ for bench in js [" benchmarks" ]:
81
73
test_name = bench ["name" ]
82
- test_name = test_name [test_name .find ('_' ) + 1 : test_name .find ('[' )]
74
+ test_name = test_name [test_name .find ("_" ) + 1 : test_name .find ("[" )]
83
75
84
76
key = bench ["param" ]
85
77
val = bench ["stats" ]["ops" ]
@@ -88,12 +80,13 @@ def get_benchmark_data():
88
80
descriptions [test_name ] = bench ["extra_info" ]["description" ]
89
81
90
82
if test_name not in results :
91
- results [test_name ] = { key : val }
83
+ results [test_name ] = {key : val }
92
84
else :
93
85
results [test_name ][key ] = val
94
86
95
87
return results , descriptions
96
88
89
+
97
90
def create_graph (test_name , test_results ):
98
91
names = []
99
92
values = []
@@ -107,12 +100,14 @@ def create_graph(test_name, test_results):
107
100
plt .savefig ("img/" + test_name + ".png" )
108
101
plt .close ()
109
102
103
+
110
104
def generate_individual_graphs ():
111
105
results , descriptions = get_benchmark_data ()
112
106
113
107
for test in results :
114
108
create_graph (test , results [test ])
115
109
110
+
116
111
# Stores the timing results in a csv file
117
112
def store_csv ():
118
113
data_dict = {}
@@ -124,9 +119,9 @@ def store_csv():
124
119
125
120
with open (BENCHMARKS_JSON ) as f :
126
121
js = json .load (f )
127
- for bench in js [' benchmarks' ]:
122
+ for bench in js [" benchmarks" ]:
128
123
test_name = bench ["name" ]
129
- test_name = test_name [test_name .find ('_' ) + 1 : test_name .find ('[' )]
124
+ test_name = test_name [test_name .find ("_" ) + 1 : test_name .find ("[" )]
130
125
131
126
pkg = bench ["param" ]
132
127
time = bench ["stats" ]["mean" ]
@@ -135,18 +130,19 @@ def store_csv():
135
130
data_dict ["Test(seconds)" ].append (test_name )
136
131
137
132
results [pkg ][test_name ] = time
138
-
133
+
139
134
for test in data_dict ["Test(seconds)" ]:
140
135
for pkg in PKG_LABELS .keys ():
141
136
if test in results [pkg ]:
142
137
data_dict [pkg ].append (results [pkg ][test ])
143
138
else :
144
139
data_dict [pkg ].append (np .nan )
145
-
140
+
146
141
df = pd .DataFrame (data_dict )
147
142
df .to_csv ("summary.csv" )
148
143
149
- def generate_group_graph (test_list = None , show_numbers = False , filename = "comparison" ):
144
+
145
+ def generate_group_graph (test_list = None , show_numbers = False , filename = "comparison" ):
150
146
results , descriptions = get_benchmark_data ()
151
147
152
148
width = 1 / (1 + len (PKG_NAMES ))
@@ -181,7 +177,7 @@ def generate_group_graph(test_list = None, show_numbers = False, filename = "com
181
177
else :
182
178
tests_values [name ].append (np .nan )
183
179
184
- fig , ax = plt .subplots (layout = ' constrained' )
180
+ fig , ax = plt .subplots (layout = " constrained" )
185
181
186
182
for name in PKG_NAMES :
187
183
offset = width * multiplier
@@ -193,21 +189,22 @@ def generate_group_graph(test_list = None, show_numbers = False, filename = "com
193
189
194
190
xlabels = []
195
191
for test in tests :
196
- xlabels .append (test + "\n " + descriptions [test ])
192
+ xlabels .append (test + "\n " + descriptions [test ])
197
193
198
- ax .set_xlabel (' Speedup' )
199
- ax .set_xscale (' log' )
200
- ax .set_title (f' Runtime Comparison\n { HARDWARE } ' )
194
+ ax .set_xlabel (" Speedup" )
195
+ ax .set_xscale (" log" )
196
+ ax .set_title (f" Runtime Comparison\n { HARDWARE } " )
201
197
ax .set_yticks (x + width , xlabels , rotation = 0 )
202
198
xmin , xmax = ax .get_xlim ()
203
199
ax .set_xlim (xmin , xmax * 2 )
204
200
205
- ax .legend (loc = ' lower right' , ncols = len (PKG_NAMES ))
201
+ ax .legend (loc = " lower right" , ncols = len (PKG_NAMES ))
206
202
fig .set_figheight (8 )
207
203
fig .set_figwidth (13 )
208
204
fig .savefig (f"img/{ filename } .png" )
209
205
plt .show ()
210
-
206
+
207
+
211
208
def main ():
212
209
store_csv ()
213
210
for backend in AFBACKENDS :
@@ -221,5 +218,6 @@ def main():
221
218
print (e )
222
219
print ("No data for" , backend )
223
220
221
+
224
222
if __name__ == "__main__" :
225
- main ()
223
+ main ()
0 commit comments