@@ -236,7 +236,7 @@ def has_low_variance(durations, durations_len):
236236
237237class BenchRunner (object ):
238238 def __init__ (self , bench_file , bench_args = None , iterations = 1 , warmup = - 1 , warmup_runs = 0 , startup = None ,
239- live_results = False ):
239+ live_results = False , self_measurement = False ):
240240 assert isinstance (iterations , int ), \
241241 "BenchRunner iterations argument must be an int, got %s instead" % iterations
242242 assert isinstance (warmup , int ), \
@@ -256,6 +256,7 @@ def __init__(self, bench_file, bench_args=None, iterations=1, warmup=-1, warmup_
256256 self .warmup = warmup if warmup > 0 else - 1
257257 self .startup = startup
258258 self .live_results = live_results
259+ self .self_measurement = self_measurement
259260
260261 @staticmethod
261262 def get_bench_module (bench_file ):
@@ -325,6 +326,7 @@ def report_iteration(iteration, duration):
325326 duration_str ))
326327
327328 report_startup = bool (self .startup )
329+ benchmark_returns_duration = self .self_measurement
328330
329331 cleanup = False
330332 cleanup_attr = self ._get_attr (ATTR_CLEANUP )
@@ -354,9 +356,9 @@ def report_iteration(iteration, duration):
354356 start = monotonic_best_accuracy ()
355357 result = bench_func (* args )
356358 cur_time = monotonic_best_accuracy ()
357- duration = cur_time - start
359+ duration = cur_time - start if not benchmark_returns_duration else result
358360 timestamps [durations_len ] = cur_time
359- durations [durations_len ] = cur_time - start
361+ durations [durations_len ] = duration
360362 durations_len += 1
361363 if live_report :
362364 report_iteration (iteration , duration )
@@ -436,6 +438,7 @@ def run_benchmark(args):
436438 bench_args = []
437439 paths = []
438440 live_results = False
441+ self_measurement = False
439442
440443 i = 0
441444 while i < len (args ):
@@ -472,12 +475,17 @@ def run_benchmark(args):
472475 paths = arg .split ("=" )[1 ].split ("," )
473476 elif arg == "--live-results" :
474477 live_results = True
478+ elif arg == "--self-measurement" :
479+ self_measurement = True
475480
476481 elif bench_file is None :
477482 bench_file = arg
478483 else :
479484 bench_args .append (arg )
480485 i += 1
486+
487+ if startup and self_measurement :
488+ raise RuntimeError ("It is not allowed to use the startup argument when self_measurement is enabled" )
481489
482490 min_required_iterations = max (startup ) if startup else 0
483491 if startup and iterations < min_required_iterations :
@@ -497,7 +505,7 @@ def run_benchmark(args):
497505 if GRAALPYTHON :
498506 print (f"### using bytecode DSL interpreter: { __graalpython__ .is_bytecode_dsl_interpreter } " )
499507
500- BenchRunner (bench_file , bench_args = bench_args , iterations = iterations , warmup = warmup , warmup_runs = warmup_runs , startup = startup , live_results = live_results ).run ()
508+ BenchRunner (bench_file , bench_args = bench_args , iterations = iterations , warmup = warmup , warmup_runs = warmup_runs , startup = startup , live_results = live_results , self_measurement = self_measurement ).run ()
501509
502510
503511if __name__ == '__main__' :
0 commit comments