@@ -27,6 +27,7 @@ import glob
27
27
28
28
DRIVER_DIR = os .path .dirname (os .path .realpath (__file__ ))
29
29
30
+
30
31
def parse_results (res , optset ):
31
32
# Parse lines like this
32
33
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
@@ -58,10 +59,12 @@ def parse_results(res, optset):
58
59
mem_test = {}
59
60
mem_test ['Data' ] = [mem_testresult ]
60
61
mem_test ['Info' ] = {}
61
- mem_test ['Name' ] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
62
+ mem_test ['Name' ] = "nts.swift/mem_maxrss." + \
63
+ optset + "." + testname + ".mem"
62
64
tests .append (mem_test )
63
65
return tests
64
66
67
+
65
68
def submit_to_lnt (data , url ):
66
69
print "\n Submitting results to LNT server..."
67
70
json_report = {'input_data' : json .dumps (data ), 'commit' : '1' }
@@ -75,6 +78,7 @@ def submit_to_lnt(data, url):
75
78
print "Error:\t " , response ['error' ]
76
79
sys .exit (1 )
77
80
81
+
78
82
def instrument_test (driver_path , test , num_samples ):
79
83
"""Run a test and instrument its peak memory use"""
80
84
test_outputs = []
@@ -113,14 +117,18 @@ def instrument_test(driver_path, test, num_samples):
113
117
114
118
return avg_test_output
115
119
120
+
116
121
def get_tests (driver_path ):
117
122
"""Return a list of available performance tests"""
118
123
return subprocess .check_output ([driver_path , '--list' ]).split ()[2 :]
119
124
125
+
120
126
def get_current_git_branch (git_repo_path ):
121
127
"""Return the selected branch for the repo `git_repo_path`"""
122
- return subprocess .check_output (['git' , '-C' , git_repo_path , 'rev-parse' ,
123
- '--abbrev-ref' , 'HEAD' ], stderr = subprocess .STDOUT ).strip ()
128
+ return subprocess .check_output (
129
+ ['git' , '-C' , git_repo_path , 'rev-parse' ,
130
+ '--abbrev-ref' , 'HEAD' ], stderr = subprocess .STDOUT ).strip ()
131
+
124
132
125
133
def log_results (log_directory , driver , formatted_output , swift_repo = None ):
126
134
"""Log `formatted_output` to a branch specific directory in
@@ -146,6 +154,7 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
146
154
with open (log_file , 'w' ) as f :
147
155
f .write (formatted_output )
148
156
157
+
149
158
def run_benchmarks (driver , benchmarks = [], num_samples = 10 , verbose = False ,
150
159
log_directory = None , swift_repo = None ):
151
160
"""Run perf tests individually and return results in a format that's
@@ -192,6 +201,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
192
201
log_results (log_directory , driver , formatted_output , swift_repo )
193
202
return formatted_output
194
203
204
+
195
205
def submit (args ):
196
206
print "SVN revision:\t " , args .revision
197
207
print "Machine name:\t " , args .machine
@@ -208,8 +218,9 @@ def submit(args):
208
218
print "Opt level:\t " , optset
209
219
file = os .path .join (args .tests , "Benchmark_" + optset )
210
220
try :
211
- res = run_benchmarks (file , benchmarks = args .benchmark ,
212
- num_samples = args .iterations )
221
+ res = run_benchmarks (
222
+ file , benchmarks = args .benchmark ,
223
+ num_samples = args .iterations )
213
224
data ['Tests' ].extend (parse_results (res , optset ))
214
225
except subprocess .CalledProcessError as e :
215
226
print "Execution failed.. Test results are empty."
@@ -227,24 +238,29 @@ def submit(args):
227
238
submit_to_lnt (data , args .lnt_host )
228
239
return 0
229
240
241
+
230
242
def run (args ):
231
243
optset = args .optimization
232
244
file = os .path .join (args .tests , "Benchmark_" + optset )
233
- run_benchmarks (file , benchmarks = args .benchmarks ,
234
- num_samples = args .iterations , verbose = True ,
235
- log_directory = args .output_dir ,
236
- swift_repo = args .swift_repo )
245
+ run_benchmarks (
246
+ file , benchmarks = args .benchmarks ,
247
+ num_samples = args .iterations , verbose = True ,
248
+ log_directory = args .output_dir ,
249
+ swift_repo = args .swift_repo )
237
250
return 0
238
251
252
+
239
253
def format_name (log_path ):
240
254
"""Return the filename and directory for a log file"""
241
255
return '/' .join (log_path .split ('/' )[- 2 :])
242
256
257
+
243
258
def compare_logs (compare_script , new_log , old_log ):
244
259
"""Return diff of log files at paths `new_log` and `old_log`"""
245
260
print 'Comparing %s %s ...' % (format_name (old_log ), format_name (new_log ))
246
261
subprocess .call ([compare_script , old_log , new_log ])
247
262
263
+
248
264
def compare (args ):
249
265
log_dir = args .log_dir
250
266
swift_repo = args .swift_repo
@@ -263,7 +279,8 @@ def compare(args):
263
279
for branch_dir in [current_branch_dir , master_branch_dir ]:
264
280
for opt in ['O' , 'Onone' ]:
265
281
recent_logs [os .path .basename (branch_dir ) + '_' + opt ] = sorted (
266
- glob .glob (os .path .join (branch_dir , 'Benchmark_' + opt + '-*.log' )),
282
+ glob .glob (os .path .join (
283
+ branch_dir , 'Benchmark_' + opt + '-*.log' )),
267
284
key = os .path .getctime , reverse = True )
268
285
269
286
if current_branch == 'master' :
@@ -311,65 +328,86 @@ def compare(args):
311
328
312
329
return 0
313
330
331
+
314
332
def positive_int (value ):
315
333
ivalue = int (value )
316
334
if not (ivalue > 0 ):
317
335
raise ValueError
318
336
return ivalue
319
337
338
+
320
339
def main ():
321
340
parser = argparse .ArgumentParser (description = 'Swift benchmarks driver' )
322
341
subparsers = parser .add_subparsers ()
323
342
324
- submit_parser = subparsers .add_parser ('submit' ,
325
- help = 'run benchmarks and submit results to LNT' )
326
- submit_parser .add_argument ('-t' , '--tests' ,
327
- help = 'directory containing Benchmark_O{,none,unchecked} ' +
328
- '(default: DRIVER_DIR)' ,
329
- default = DRIVER_DIR )
330
- submit_parser .add_argument ('-m' , '--machine' , required = True ,
331
- help = 'LNT machine name' )
332
- submit_parser .add_argument ('-r' , '--revision' , required = True ,
333
- help = 'SVN revision of compiler to identify the LNT run' , type = int )
334
- submit_parser .add_argument ('-l' , '--lnt_host' , required = True ,
335
- help = 'LNT host to submit results to' )
336
- submit_parser .add_argument ('-i' , '--iterations' ,
337
- help = 'number of times to run each test (default: 10)' ,
338
- type = positive_int , default = 10 )
339
- submit_parser .add_argument ('-o' , '--optimization' , nargs = '+' ,
340
- help = 'optimization levels to use (default: O Onone Ounchecked)' ,
341
- default = ['O' , 'Onone' , 'Ounchecked' ])
342
- submit_parser .add_argument ('benchmark' ,
343
- help = 'benchmark to run (default: all)' , nargs = '*' )
343
+ submit_parser = subparsers .add_parser (
344
+ 'submit' ,
345
+ help = 'run benchmarks and submit results to LNT' )
346
+ submit_parser .add_argument (
347
+ '-t' , '--tests' ,
348
+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
349
+ '(default: DRIVER_DIR)' ,
350
+ default = DRIVER_DIR )
351
+ submit_parser .add_argument (
352
+ '-m' , '--machine' , required = True ,
353
+ help = 'LNT machine name' )
354
+ submit_parser .add_argument (
355
+ '-r' , '--revision' , required = True ,
356
+ help = 'SVN revision of compiler to identify the LNT run' , type = int )
357
+ submit_parser .add_argument (
358
+ '-l' , '--lnt_host' , required = True ,
359
+ help = 'LNT host to submit results to' )
360
+ submit_parser .add_argument (
361
+ '-i' , '--iterations' ,
362
+ help = 'number of times to run each test (default: 10)' ,
363
+ type = positive_int , default = 10 )
364
+ submit_parser .add_argument (
365
+ '-o' , '--optimization' , nargs = '+' ,
366
+ help = 'optimization levels to use (default: O Onone Ounchecked)' ,
367
+ default = ['O' , 'Onone' , 'Ounchecked' ])
368
+ submit_parser .add_argument (
369
+ 'benchmark' ,
370
+ help = 'benchmark to run (default: all)' , nargs = '*' )
344
371
submit_parser .set_defaults (func = submit )
345
372
346
- run_parser = subparsers .add_parser ('run' ,
347
- help = 'run benchmarks and output results to stdout' )
348
- run_parser .add_argument ('-t' , '--tests' ,
349
- help = 'directory containing Benchmark_O{,none,unchecked} ' +
350
- '(default: DRIVER_DIR)' ,
351
- default = DRIVER_DIR )
352
- run_parser .add_argument ('-i' , '--iterations' ,
353
- help = 'number of times to run each test (default: 1)' ,
354
- type = positive_int , default = 1 )
355
- run_parser .add_argument ('-o' , '--optimization' ,
356
- help = 'optimization level to use (default: O)' , default = 'O' )
357
- run_parser .add_argument ('--output-dir' ,
358
- help = 'log results to directory (default: no logging)' )
359
- run_parser .add_argument ('--swift-repo' ,
360
- help = 'absolute path to Swift source repo for branch comparison' )
361
- run_parser .add_argument ('benchmarks' ,
362
- help = 'benchmark to run (default: all)' , nargs = '*' )
373
+ run_parser = subparsers .add_parser (
374
+ 'run' ,
375
+ help = 'run benchmarks and output results to stdout' )
376
+ run_parser .add_argument (
377
+ '-t' , '--tests' ,
378
+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
379
+ '(default: DRIVER_DIR)' ,
380
+ default = DRIVER_DIR )
381
+ run_parser .add_argument (
382
+ '-i' , '--iterations' ,
383
+ help = 'number of times to run each test (default: 1)' ,
384
+ type = positive_int , default = 1 )
385
+ run_parser .add_argument (
386
+ '-o' , '--optimization' ,
387
+ help = 'optimization level to use (default: O)' , default = 'O' )
388
+ run_parser .add_argument (
389
+ '--output-dir' ,
390
+ help = 'log results to directory (default: no logging)' )
391
+ run_parser .add_argument (
392
+ '--swift-repo' ,
393
+ help = 'absolute path to Swift source repo for branch comparison' )
394
+ run_parser .add_argument (
395
+ 'benchmarks' ,
396
+ help = 'benchmark to run (default: all)' , nargs = '*' )
363
397
run_parser .set_defaults (func = run )
364
398
365
- compare_parser = subparsers .add_parser ('compare' ,
366
- help = 'compare benchmark results' )
367
- compare_parser .add_argument ('--log-dir' , required = True ,
368
- help = 'directory containing benchmark logs' )
369
- compare_parser .add_argument ('--swift-repo' , required = True ,
370
- help = 'absolute path to Swift source repo' )
371
- compare_parser .add_argument ('--compare-script' , required = True ,
372
- help = 'absolute path to compare script' )
399
+ compare_parser = subparsers .add_parser (
400
+ 'compare' ,
401
+ help = 'compare benchmark results' )
402
+ compare_parser .add_argument (
403
+ '--log-dir' , required = True ,
404
+ help = 'directory containing benchmark logs' )
405
+ compare_parser .add_argument (
406
+ '--swift-repo' , required = True ,
407
+ help = 'absolute path to Swift source repo' )
408
+ compare_parser .add_argument (
409
+ '--compare-script' , required = True ,
410
+ help = 'absolute path to compare script' )
373
411
compare_parser .set_defaults (func = compare )
374
412
375
413
args = parser .parse_args ()
0 commit comments