@@ -16,12 +16,17 @@ class Benchmark {
1616 /// Create a benchmark with the given [name] , starts measuring total run time.
1717 ///
1818 /// ```dart
19- /// await Benchmark('Name', iterations: 1, coefficient: 1 / count).report();
19+ /// await Benchmark('Name', coefficient: 1 / count).report();
2020 /// ```
2121 ///
2222 /// Call [report] on this to await results.
2323 ///
24- /// Runs the [runIteration] function [iterations] times, defaults to 1.
24+ /// Runs the [runIteration] function at least [iterations] times, defaults to
25+ /// 1. If possible, leave this at the default. The benchmark is automatically
26+ /// re-run a) to warm up until at least 100 ms have passed (e.g. to avoid
27+ /// caching skewing results) and b) to measure until at least 2 seconds have
28+ /// passed. This is similar to how Dart's benchmark_harness and various other
29+ /// benchmarking libraries in other languages work.
2530 ///
2631 /// Set a fraction in [coefficient] to multiply the measured value of a run
2732 /// with, defaults to 1. Use this if a run calls a to be measured function
@@ -108,9 +113,11 @@ class Benchmark {
108113 /// Starts the benchmark and waits for the result.
109114 ///
110115 /// - Calls [setup] , then
111- /// - repeatedly calls [run] for at least 100 ms to warm up,
112- /// - then calls [run] repeatedly for at least 2000 ms and collects the
113- /// average elapsed time of a call (if run multiple times), then
116+ /// - repeatedly calls [run] for at least 100 ms to warm up to avoid effects
117+ /// e.g. due to caching,
118+ /// - then calls [run] repeatedly until at least 2000 ms have passed to ensure
119+ /// stable results and collects the average elapsed time of a call (if run
120+ /// multiple times), then
114121 /// - calls [teardown] and returns the result.
115122 @nonVirtual
116123 Future <void > report () async {
0 commit comments