Skip to content

Commit fb0d3f8

Browse files
Initial implementation of dynamic test generation
This commit contains the initial implementation of dynamic test generation for our graphblas-opt tests. We now have a framework for writing parameterized tests that generate *mlir files at test-running time.
1 parent 7f3ab14 commit fb0d3f8

File tree

8 files changed

+1431
-87
lines changed

8 files changed

+1431
-87
lines changed

mlir_graphblas/src/test/CMakeLists.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,12 @@ add_lit_testsuite(check-graphblas "Running the graphblas regression tests"
1818
set_target_properties(check-graphblas PROPERTIES FOLDER "Tests")
1919

2020
add_lit_testsuites(GRAPHBLAS ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${GRAPHBLAS_TEST_DEPENDS})
21+
22+
add_custom_target(dynamically_generated_mlir_tests
23+
COMMAND python generate_tests.py
24+
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dynamically_generate_tests"
25+
COMMENT "Generate test cases."
26+
VERBATIM
27+
)
28+
29+
add_dependencies(check-graphblas dynamically_generated_mlir_tests)

mlir_graphblas/src/test/GraphBLAS/graphblas-opt.mlir

Lines changed: 0 additions & 3 deletions
This file was deleted.

mlir_graphblas/src/test/GraphBLAS/invalid_matrix_apply.mlir

Lines changed: 0 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -7,86 +7,6 @@
77
indexBitWidth = 64
88
}>
99

10-
module {
11-
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xbf16>, %thunk: bf16) -> tensor<2x3xbf16, #CSR64> {
12-
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "min" } : (tensor<2x3xbf16>, bf16) to tensor<2x3xbf16, #CSR64> // expected-error {{Operand #0 must be a sparse tensor.}}
13-
return %answer : tensor<2x3xbf16, #CSR64>
14-
}
15-
}
16-
17-
// -----
18-
19-
#CSR64 = #sparse_tensor.encoding<{
20-
dimLevelType = [ "dense", "compressed" ],
21-
dimOrdering = affine_map<(i,j) -> (i,j)>,
22-
pointerBitWidth = 64,
23-
indexBitWidth = 64
24-
}>
25-
26-
module {
27-
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xbf16, #CSR64>, %thunk: bf16) -> tensor<2x3xbf16> {
28-
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "min" } : (tensor<2x3xbf16, #CSR64>, bf16) to tensor<2x3xbf16> // expected-error {{Return value must be a sparse tensor.}}
29-
return %answer : tensor<2x3xbf16>
30-
}
31-
}
32-
33-
// -----
34-
35-
#CSR64 = #sparse_tensor.encoding<{
36-
dimLevelType = [ "dense", "compressed" ],
37-
dimOrdering = affine_map<(i,j) -> (i,j)>,
38-
pointerBitWidth = 64,
39-
indexBitWidth = 64
40-
}>
41-
42-
module {
43-
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xi8, #CSR64>, %thunk: f16) -> tensor<2x3xf16, #CSR64> {
44-
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "min" } : (tensor<2x3xi8, #CSR64>, f16) to tensor<2x3xf16, #CSR64> // expected-error {{Element type of input tensor does not match type of thunk.}}
45-
return %answer : tensor<2x3xf16, #CSR64>
46-
}
47-
}
48-
49-
// -----
50-
51-
#CSR64 = #sparse_tensor.encoding<{
52-
dimLevelType = [ "dense", "compressed" ],
53-
dimOrdering = affine_map<(i,j) -> (i,j)>,
54-
pointerBitWidth = 64,
55-
indexBitWidth = 64
56-
}>
57-
58-
module {
59-
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xi8, #CSR64>, %thunk: i8) -> tensor<2x3xf16, #CSR64> {
60-
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "min" } : (tensor<2x3xi8, #CSR64>, i8) to tensor<2x3xf16, #CSR64> // expected-error {{Element type of result tensor does not match type of thunk.}}
61-
return %answer : tensor<2x3xf16, #CSR64>
62-
}
63-
}
64-
65-
// -----
66-
67-
#CSR64 = #sparse_tensor.encoding<{
68-
dimLevelType = [ "dense", "compressed" ],
69-
dimOrdering = affine_map<(i,j) -> (i,j)>,
70-
pointerBitWidth = 64,
71-
indexBitWidth = 64
72-
}>
73-
74-
module {
75-
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xi8, #CSR64>, %thunk: i8) -> tensor<99x99xi8, #CSR64> {
76-
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "min" } : (tensor<2x3xi8, #CSR64>, i8) to tensor<99x99xi8, #CSR64> // expected-error {{Input shape does not match output shape.}}
77-
return %answer : tensor<99x99xi8, #CSR64>
78-
}
79-
}
80-
81-
// -----
82-
83-
#CSR64 = #sparse_tensor.encoding<{
84-
dimLevelType = [ "dense", "compressed" ],
85-
dimOrdering = affine_map<(i,j) -> (i,j)>,
86-
pointerBitWidth = 64,
87-
indexBitWidth = 64
88-
}>
89-
9010
module {
9111
func @matrix_apply_wrapper(%sparse_tensor: tensor<2x3xi8, #CSR64>, %thunk: i8) -> tensor<2x3xi8, #CSR64> {
9212
%answer = graphblas.matrix_apply %sparse_tensor, %thunk { apply_operator = "BADOPERATOR" } : (tensor<2x3xi8, #CSR64>, i8) to tensor<2x3xi8, #CSR64> // expected-error {{"BADOPERATOR" is not a supported operator.}}

mlir_graphblas/src/test/GraphBLAS/invalid_matrix_multiply.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ module {
177177

178178
module {
179179
func @matrix_multiply_wrapper(%argA: tensor<2x3xi64, #CSR64>, %argB: tensor<3x2xi64, #CSC64>, %mask: tensor<2x2xi64>) -> tensor<2x2xi64, #CSR64> {
180-
%answer = graphblas.matrix_multiply %argA, %argB, %mask{ semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<2x2xi64>) to tensor<2x2xi64, #CSR64> // expected-error {{Operand #2 must be a sparse tensor.}}
180+
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<2x2xi64>) to tensor<2x2xi64, #CSR64> // expected-error {{Operand #2 must be a sparse tensor.}}
181181
return %answer : tensor<2x2xi64, #CSR64>
182182
}
183183
}
@@ -200,7 +200,7 @@ module {
200200

201201
module {
202202
func @matrix_multiply_wrapper(%argA: tensor<2x3xi64, #CSR64>, %argB: tensor<3x2xi64, #CSC64>, %mask: tensor<2x999xi64, #CSR64>) -> tensor<2x2xi64, #CSR64> {
203-
%answer = graphblas.matrix_multiply %argA, %argB, %mask{ semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<2x999xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
203+
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<2x999xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
204204
return %answer : tensor<2x2xi64, #CSR64>
205205
}
206206
}
@@ -223,7 +223,7 @@ module {
223223

224224
module {
225225
func @matrix_multiply_wrapper(%argA: tensor<2x3xi64, #CSR64>, %argB: tensor<3x2xi64, #CSC64>, %mask: tensor<999x2xi64, #CSR64>) -> tensor<2x2xi64, #CSR64> {
226-
%answer = graphblas.matrix_multiply %argA, %argB, %mask{ semiring = "plus_pair" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<999x2xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
226+
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_pair" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<999x2xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
227227
return %answer : tensor<2x2xi64, #CSR64>
228228
}
229229
}
@@ -246,7 +246,7 @@ module {
246246

247247
module {
248248
func @matrix_multiply_wrapper(%argA: tensor<2x3xi64, #CSR64>, %argB: tensor<3x2xi64, #CSC64>, %mask: tensor<999x999xi64, #CSR64>) -> tensor<2x2xi64, #CSR64> {
249-
%answer = graphblas.matrix_multiply %argA, %argB, %mask{ semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<999x999xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
249+
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_times" } : (tensor<2x3xi64, #CSR64>, tensor<3x2xi64, #CSC64>, tensor<999x999xi64, #CSR64>) to tensor<2x2xi64, #CSR64> // expected-error {{Mask shape must match output shape.}}
250250
return %answer : tensor<2x2xi64, #CSR64>
251251
}
252252
}
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import inspect
2+
import os
3+
import pathlib
4+
import shutil
5+
import itertools
6+
7+
from test_utils import current_test_generators
8+
9+
# initialize test generators by importing modules defining them
10+
import invalid_matrix_apply_test_generation
11+
import invalid_matrix_multiply_test_generation
12+
13+
if __name__ == "__main__":
14+
current_module_dir = pathlib.Path(__file__).parent.absolute()
15+
test_file_dir = os.path.join(current_module_dir, "automatically_generated_tests")
16+
if os.path.isdir(test_file_dir):
17+
shutil.rmtree(test_file_dir)
18+
os.makedirs(test_file_dir)
19+
counter = itertools.count()
20+
print("Generating tests...")
21+
# TODO this writes tons of files; we can reduce this to a few files by grabbing
22+
# all the content strings, grouping them by their (canonicalized) "RUN:"
23+
# command, and writing those to disk.
24+
for generator in current_test_generators():
25+
for content_string in generator():
26+
name = f"test_{next(counter)}.mlir"
27+
abs_name = os.path.join(test_file_dir, name)
28+
with open(abs_name, "w") as f:
29+
f.write(content_string)
30+
print(
31+
f" - Finished generating tests for {generator.original_func.__qualname__} "
32+
f"from {inspect.getfile(generator.original_func)}."
33+
)
34+
print(f"{next(counter)} and written to {current_module_dir}.")

0 commit comments

Comments
 (0)