-
Notifications
You must be signed in to change notification settings - Fork 10.5k
/
Copy pathrun_smoke_bench
executable file
·402 lines (346 loc) · 12.2 KB
/
run_smoke_bench
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ===--- run_smoke_bench -------------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
#
# Performs a very fast check which benchmarks regressed and improved.
#
# Initially runs the benchmarks with a low sample count and just re-runs those
# benchmarks which differ.
# Also reports code size differences.
#
# ===---------------------------------------------------------------------===//
import argparse
import glob
import os
import subprocess
import sys
from imp import load_source
from compare_perf_tests import LogParser, TestComparator, create_report
# import Benchmark_Driver # doesn't work because it misses '.py' extension
Benchmark_Driver = load_source(
"Benchmark_Driver",
os.path.join(os.path.dirname(os.path.abspath(__file__)), "Benchmark_Driver"),
)
# from Benchmark_Driver import BenchmarkDriver, BenchmarkDoctor, ...
BenchmarkDriver = Benchmark_Driver.BenchmarkDriver
BenchmarkDoctor = Benchmark_Driver.BenchmarkDoctor
MarkdownReportHandler = Benchmark_Driver.MarkdownReportHandler
VERBOSE = False
class DriverArgs(object):
"""Arguments for BenchmarkDriver."""
def __init__(self, benchmark_dir, architecture, platform, optimization="O"):
"""Initialize with path to the build-dir and optimization level."""
self.benchmarks = None
self.filters = None
self.tests = os.path.join(benchmark_dir, "bin")
self.optimization = optimization
self.architecture = architecture
self.libdir = os.path.join(benchmark_dir, "lib", "swift", platform)
def log(msg):
print(msg)
sys.stdout.flush()
def main():
global VERBOSE
argparser = argparse.ArgumentParser()
argparser.add_argument(
"-verbose", action="store_true", help="print verbose messages"
)
argparser.add_argument(
"-O",
action="append_const",
const="O",
dest="opt_levels",
help="test -O benchmarks",
)
argparser.add_argument(
"-Osize",
action="append_const",
const="Osize",
dest="opt_levels",
help="test -Osize benchmarks",
)
argparser.add_argument(
"-Onone",
action="append_const",
const="Onone",
dest="opt_levels",
help="test -Onone benchmarks (except code size)",
)
argparser.add_argument(
"-skip-code-size",
action="store_true",
help="Don't report code size differences",
)
argparser.add_argument(
"-skip-performance",
action="store_true",
help="Don't report performance differences",
)
argparser.add_argument(
"-skip-check-added",
action="store_true",
help="Don't validate newly added benchmarks",
)
argparser.add_argument(
"-o",
type=str,
help="In addition to stdout, write the results into a markdown file",
)
argparser.add_argument(
"-threshold",
type=float,
help="The performance threshold in %% which triggers a re-run",
default=5,
)
argparser.add_argument(
"-num-samples",
type=int,
help="The (minimum) number of samples to run",
default=3,
)
argparser.add_argument(
"-num-reruns",
type=int,
help="The number of re-runs until it's assumed to be a real change",
default=8,
)
argparser.add_argument(
"-arch",
type=str,
help="The architecture. The default is x86_64",
default="x86_64",
)
argparser.add_argument(
"-platform", type=str, help="The benchmark build platform", default="macosx"
)
argparser.add_argument(
"oldbuilddir", nargs=1, type=str, help="old benchmark build directory"
)
argparser.add_argument(
"newbuilddir", nargs=1, type=str, help="new benchmark build directory"
)
args = argparser.parse_args()
VERBOSE = args.verbose
return test_opt_levels(args)
def test_opt_levels(args):
output_file = None
if args.o:
output_file = open(args.o, "w")
changes = False
for opt_level in args.opt_levels or ["O", "Osize", "Onone"]:
log("####### Testing optimization level -" + opt_level + " #######")
if not args.skip_performance:
if test_performance(
opt_level,
args.oldbuilddir[0],
args.newbuilddir[0],
float(args.threshold) / 100,
args.num_samples,
args.num_reruns,
output_file,
args.arch,
args.platform
):
changes = True
# There is no point in reporting code size for Onone.
if not args.skip_code_size and opt_level != "Onone":
if report_code_size(
opt_level,
args.oldbuilddir[0],
args.newbuilddir[0],
args.arch,
args.platform,
output_file,
):
changes = True
if not args.skip_code_size:
if report_code_size(
"swiftlibs",
args.oldbuilddir[0],
args.newbuilddir[0],
args.arch,
args.platform,
output_file,
):
changes = True
if not args.skip_check_added:
check_added(args, output_file)
if output_file:
if changes:
output_file.write(get_info_text())
else:
output_file.write("### No performance and code size changes")
output_file.close()
return 0
def measure(driver, tests, i):
"""Log and measure samples of the tests with the given driver.
Collect increasing number of samples, depending on the iteration.
"""
num_samples = min(i + 3, 10)
msg = " Iteration {0} for {1}: num samples = {2}, ".format(
i, driver.args.tests, num_samples
)
msg += (
"running all tests"
if driver.all_tests == tests
else "re-testing {0} tests".format(len(tests))
)
log(msg)
driver.tests = tests
return driver.run(num_samples=num_samples, sample_time=0.0025)
def merge(results, other_results):
""""Merge the other PerformanceTestResults into the first dictionary."""
for test, result in other_results.items():
results[test].merge(result)
return results
def test_performance(
opt_level, old_dir, new_dir, threshold, num_samples, num_reruns,
output_file, arch, platform
):
"""Detect performance changes in benchmarks.
Start fast with few samples per benchmark and gradually spend more time
gathering more precise measurements of the change candidates.
"""
i, unchanged_length_count = 0, 0
old, new = [
BenchmarkDriver(DriverArgs(dir, architecture=arch, platform=platform,
optimization=opt_level))
for dir in [old_dir, new_dir]
]
results = [measure(driver, driver.tests, i) for driver in [old, new]]
tests = TestComparator(results[0], results[1], threshold)
changed = tests.decreased + tests.increased
while len(changed) > 0 and unchanged_length_count < num_reruns:
i += 1
if VERBOSE:
log(" test again: " + str([test.name for test in changed]))
results = [
merge(the_results, measure(driver, [test.name for test in changed], i))
for the_results, driver in zip(results, [old, new])
]
tests = TestComparator(results[0], results[1], threshold)
changed = tests.decreased + tests.increased
if len(old.tests) == len(changed):
unchanged_length_count += 1
else:
unchanged_length_count = 0
log("")
report_title = "Performance ({}): -{}".format(arch, opt_level)
return report_results(
report_title, None, None, threshold * 1.4, output_file, *results
)
def report_code_size(opt_level, old_dir, new_dir, architecture, platform, output_file):
if opt_level == "swiftlibs":
files = glob.glob(os.path.join(old_dir, "lib", "swift", platform, "*.dylib"))
else:
files = glob.glob(
os.path.join(old_dir, opt_level + "-" + architecture + "*" +
platform + "*", "*.o")
)
idx = 1
old_lines = ""
new_lines = ""
for oldfile in files:
new_dir = os.path.join(new_dir, '')
newfile = oldfile.replace(old_dir, new_dir, 1)
if os.path.isfile(newfile):
oldsize = get_codesize(oldfile)
newsize = get_codesize(newfile)
bname = os.path.basename(oldfile)
def result_line(value):
v = "," + str(value)
return str(idx) + "," + bname + ",1" + (v * 3) + ",0" + v + "\n"
old_lines += result_line(oldsize)
new_lines += result_line(newsize)
idx += 1
return report_results(
"Code size: -" + opt_level, old_lines, new_lines, 0.01, output_file
)
def get_codesize(filename):
output = subprocess.check_output(["size", filename])
lines = output.decode('utf-8').splitlines()
header_line = lines[0]
data_line = lines[1]
if header_line.find("__TEXT") != 0:
sys.exit("unexpected output from size command:\n" + lines)
return int(data_line.split("\t")[0])
def report_results(
title,
old_lines,
new_lines,
threshold,
output_file,
old_results=None,
new_results=None,
):
old_results = old_results or LogParser.results_from_string(old_lines)
new_results = new_results or LogParser.results_from_string(new_lines)
print("------- " + title + " -------")
print(create_report(old_results, new_results, threshold, "git"))
if output_file:
report = create_report(old_results, new_results, threshold, "markdown")
if report != "":
output_file.write("### " + title + "\n")
output_file.write(report)
output_file.write("\n")
return True
return False
def get_info_text():
text = """
<details>
<summary><strong>How to read the data</strong></summary>
The tables contain differences in performance which are larger than 8% and
differences in code size which are larger than 1%.
If you see any unexpected regressions, you should consider fixing the
regressions before you merge the PR.
**Noise**: Sometimes the performance results (not code size!) contain false
alarms. Unexpected regressions which are marked with '(?)' are probably noise.
If you see regressions which you cannot explain you can try to run the
benchmarks again. If regressions still show up, please consult with the
performance team (@eeckstein).
</details>
<details>
<summary><strong>Hardware Overview</strong></summary>
"""
output = subprocess.check_output(["system_profiler", "SPHardwareDataType"])
po = output.decode('utf-8')
for line in po.splitlines():
selection = [
"Model Name",
"Model Identifier",
"Processor Name",
"Processor Speed",
"Number of Processors",
"Total Number of Cores",
"L2 Cache",
"L3 Cache",
"Memory:",
]
if any(s in line for s in selection):
text += line + "\n"
text += """
</details>"""
return text
def check_added(args, output_file=None):
old = BenchmarkDriver(DriverArgs(args.oldbuilddir[0], architecture=args.arch,
platform=args.platform))
new = BenchmarkDriver(DriverArgs(args.newbuilddir[0], architecture=args.arch,
platform=args.platform))
added = set(new.tests).difference(set(old.tests))
new.tests = list(added)
doctor = BenchmarkDoctor(args, driver=new)
if added and output_file:
doctor.log.addHandler(MarkdownReportHandler(output_file))
doctor.check()
if __name__ == "__main__":
sys.exit(main())