-
Notifications
You must be signed in to change notification settings - Fork 84
Add warnings about too few or too many samples #210
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
766ecdc
0e3dbc1
7c47af1
a685a14
56dfad1
980003e
a02bff9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -400,7 +400,7 @@ def value_bucket(value): | |
return lines | ||
|
||
|
||
def format_checks(bench, lines=None): | ||
def format_checks(bench, lines=None, check_too_many_processes=False): | ||
if lines is None: | ||
lines = [] | ||
|
||
|
@@ -413,7 +413,7 @@ def format_checks(bench, lines=None): | |
warnings = [] | ||
warn = warnings.append | ||
|
||
required_nsamples = bench.required_nsamples() | ||
required_nprocesses = bench.required_nprocesses() | ||
|
||
# Display a warning if the standard deviation is greater than 10% | ||
# of the mean | ||
|
@@ -426,8 +426,8 @@ def format_checks(bench, lines=None): | |
else: | ||
# display a warning if the number of samples isn't enough to get a stable result | ||
if ( | ||
required_nsamples is not None and | ||
required_nsamples > len(bench._runs) | ||
required_nprocesses is not None and | ||
required_nprocesses > len(bench._runs) | ||
): | ||
warn("Not enough samples to get a stable result (95% certainly of less than 1% variation)") | ||
|
||
|
@@ -467,13 +467,14 @@ def format_checks(bench, lines=None): | |
lines.append("Use --quiet option to hide these warnings.") | ||
|
||
if ( | ||
required_nsamples is not None and | ||
required_nsamples < len(bench._runs) * 0.75 | ||
check_too_many_processes and | ||
required_nprocesses is not None and | ||
required_nprocesses < len(bench._runs) * 0.75 | ||
): | ||
lines.append("Benchmark was run more times than necessary to get a stable result.") | ||
lines.append( | ||
"Consider passing processes=%d to the Runner constructor to save time." % | ||
required_nsamples | ||
required_nprocesses | ||
) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This warning may be a little bit annoying. Maybe only show it in the "pyperf check" command? https://pyperf.readthedocs.io/en/latest/cli.html#check-cmd There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, that's a good idea. We can run |
||
|
||
# Warn if nohz_full+intel_pstate combo if found in cpu_config metadata | ||
|
@@ -568,7 +569,7 @@ def format_result(bench): | |
|
||
def format_benchmark(bench, checks=True, metadata=False, | ||
dump=False, stats=False, hist=False, show_name=False, | ||
result=True, display_runs_args=None): | ||
result=True, display_runs_args=None, only_checks=False): | ||
lines = [] | ||
|
||
if metadata: | ||
|
@@ -587,7 +588,7 @@ def format_benchmark(bench, checks=True, metadata=False, | |
format_stats(bench, lines=lines) | ||
|
||
if checks: | ||
format_checks(bench, lines=lines) | ||
format_checks(bench, lines=lines, check_too_many_processes=only_checks) | ||
|
||
if result: | ||
empty_line(lines) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -478,16 +478,11 @@ def test_hist(self): | |
22.8 ms: 3 ############## | ||
22.9 ms: 4 ################### | ||
22.9 ms: 4 ################### | ||
Benchmark was run more times than necessary to get a stable result. | ||
Consider passing processes=7 to the Runner constructor to save time. | ||
""") | ||
self.check_command(expected, 'hist', TELCO, env=env) | ||
|
||
def test_show(self): | ||
expected = (""" | ||
Benchmark was run more times than necessary to get a stable result. | ||
Consider passing processes=7 to the Runner constructor to save time. | ||
|
||
Mean +- std dev: 22.5 ms +- 0.2 ms | ||
""") | ||
self.check_command(expected, 'show', TELCO) | ||
|
@@ -523,8 +518,6 @@ def test_stats(self): | |
100th percentile: 22.9 ms (+2% of the mean) -- maximum | ||
|
||
Number of outlier (out of 22.0 ms..23.0 ms): 0 | ||
Benchmark was run more times than necessary to get a stable result. | ||
Consider passing processes=7 to the Runner constructor to save time. | ||
""") | ||
self.check_command(expected, 'stats', TELCO) | ||
|
||
|
@@ -635,6 +628,14 @@ def test_slowest(self): | |
|
||
def test_check_stable(self): | ||
stdout = self.run_command('check', TELCO) | ||
self.assertTrue( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I suggest using assertIn() instead. |
||
textwrap.dedent( | ||
""" | ||
Benchmark was run more times than necessary to get a stable result. | ||
Consider passing processes=7 to the Runner constructor to save time. | ||
""" | ||
).strip() in stdout.rstrip() | ||
) | ||
self.assertTrue( | ||
'The benchmark seems to be stable' in | ||
stdout.rstrip() | ||
|
Uh oh!
There was an error while loading. Please reload this page.