@@ -30,11 +30,11 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
30
30
def parse_results (res , optset ):
31
31
# Parse lines like this
32
32
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
33
- SCORERE = re .compile (r"(\d+),[ \t]*(\w+)," + \
34
- "," .join ([r"[ \t]*([\d.]+)" ]* 7 ))
33
+ SCORERE = re .compile (r"(\d+),[ \t]*(\w+)," +
34
+ "," .join ([r"[ \t]*([\d.]+)" ] * 7 ))
35
35
# The Totals line would be parsed like this.
36
- TOTALRE = re .compile (r"()(Totals)," + \
37
- "," .join ([r"[ \t]*([\d.]+)" ]* 7 ))
36
+ TOTALRE = re .compile (r"()(Totals)," +
37
+ "," .join ([r"[ \t]*([\d.]+)" ] * 7 ))
38
38
KEYGROUP = 2
39
39
VALGROUP = 4
40
40
MEMGROUP = 9
@@ -51,14 +51,14 @@ def parse_results(res, optset):
51
51
test = {}
52
52
test ['Data' ] = [testresult ]
53
53
test ['Info' ] = {}
54
- test ['Name' ] = "nts.swift/" + optset + "." + testname + ".exec"
54
+ test ['Name' ] = "nts.swift/" + optset + "." + testname + ".exec"
55
55
tests .append (test )
56
56
if testname != 'Totals' :
57
57
mem_testresult = int (m .group (MEMGROUP ))
58
58
mem_test = {}
59
59
mem_test ['Data' ] = [mem_testresult ]
60
60
mem_test ['Info' ] = {}
61
- mem_test ['Name' ] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
61
+ mem_test ['Name' ] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
62
62
tests .append (mem_test )
63
63
return tests
64
64
@@ -85,7 +85,7 @@ def instrument_test(driver_path, test, num_samples):
85
85
)
86
86
peak_memory = re .match ('\s*(\d+)\s*maximum resident set size' ,
87
87
test_output_raw .split ('\n ' )[- 15 ]).group (1 )
88
- test_outputs .append (test_output_raw .split ()[1 ].split (',' ) + \
88
+ test_outputs .append (test_output_raw .split ()[1 ].split (',' ) +
89
89
[peak_memory ])
90
90
91
91
# Average sample results
@@ -102,7 +102,7 @@ def instrument_test(driver_path, test, num_samples):
102
102
for i in range (AVG_START_INDEX , len (test_output )):
103
103
avg_test_output [i ] += int (test_output [i ])
104
104
for i in range (AVG_START_INDEX , len (avg_test_output )):
105
- avg_test_output [i ] = int (round (avg_test_output [i ] / \
105
+ avg_test_output [i ] = int (round (avg_test_output [i ] /
106
106
float (len (test_outputs ))))
107
107
avg_test_output [NUM_SAMPLES_INDEX ] = num_samples
108
108
avg_test_output [MIN_INDEX ] = min (test_outputs ,
@@ -152,8 +152,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
152
152
only run tests included in it."""
153
153
(total_tests , total_min , total_max , total_mean ) = (0 , 0 , 0 , 0 )
154
154
output = []
155
- headings = ['#' , 'TEST' ,'SAMPLES' ,'MIN(μs)' ,'MAX(μs)' ,'MEAN(μs)' , 'SD (μs)' ,
156
- 'MEDIAN(μs)' ,'MAX_RSS(B)' ]
155
+ headings = ['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'MAX(μs)' , 'MEAN(μs)' ,
156
+ 'SD(μs)' , ' MEDIAN(μs)' , 'MAX_RSS(B)' ]
157
157
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
158
158
if verbose and log_directory :
159
159
print line_format .format (* headings )
@@ -182,7 +182,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
182
182
totals_output = '\n \n ' + ',' .join (totals )
183
183
if verbose :
184
184
if log_directory :
185
- print line_format .format (* (['' ]+ totals ))
185
+ print line_format .format (* (['' ] + totals ))
186
186
else :
187
187
print totals_output [1 :]
188
188
formatted_output += totals_output
@@ -204,7 +204,7 @@ def submit(args):
204
204
print "\n Running benchmarks..."
205
205
for optset in args .optimization :
206
206
print "Opt level:\t " , optset
207
- file = os .path .join (args .tests , "Benchmark_" + optset )
207
+ file = os .path .join (args .tests , "Benchmark_" + optset )
208
208
try :
209
209
res = run_benchmarks (file , benchmarks = args .benchmark ,
210
210
num_samples = args .iterations )
@@ -227,7 +227,7 @@ def submit(args):
227
227
228
228
def run (args ):
229
229
optset = args .optimization
230
- file = os .path .join (args .tests , "Benchmark_" + optset )
230
+ file = os .path .join (args .tests , "Benchmark_" + optset )
231
231
run_benchmarks (file , benchmarks = args .benchmarks ,
232
232
num_samples = args .iterations , verbose = True ,
233
233
log_directory = args .output_dir ,
@@ -260,10 +260,9 @@ def compare(args):
260
260
recent_logs = {}
261
261
for branch_dir in [current_branch_dir , master_branch_dir ]:
262
262
for opt in ['O' , 'Onone' ]:
263
- recent_logs [os .path .basename (branch_dir ) + '_' + opt ] = \
264
- sorted (glob .glob (os .path .join (branch_dir ,
265
- 'Benchmark_' + opt + '-*.log' )), key = os .path .getctime ,
266
- reverse = True )
263
+ recent_logs [os .path .basename (branch_dir ) + '_' + opt ] = sorted (
264
+ glob .glob (os .path .join (branch_dir , 'Benchmark_' + opt + '-*.log' )),
265
+ key = os .path .getctime , reverse = True )
267
266
268
267
if current_branch == 'master' :
269
268
if len (recent_logs ['master_O' ]) > 1 and \
@@ -323,7 +322,7 @@ def main():
323
322
submit_parser = subparsers .add_parser ('submit' ,
324
323
help = 'run benchmarks and submit results to LNT' )
325
324
submit_parser .add_argument ('-t' , '--tests' ,
326
- help = 'directory containing Benchmark_O{,none,unchecked} ' + \
325
+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
327
326
'(default: DRIVER_DIR)' ,
328
327
default = DRIVER_DIR )
329
328
submit_parser .add_argument ('-m' , '--machine' , required = True ,
@@ -345,7 +344,7 @@ def main():
345
344
run_parser = subparsers .add_parser ('run' ,
346
345
help = 'run benchmarks and output results to stdout' )
347
346
run_parser .add_argument ('-t' , '--tests' ,
348
- help = 'directory containing Benchmark_O{,none,unchecked} ' + \
347
+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
349
348
'(default: DRIVER_DIR)' ,
350
349
default = DRIVER_DIR )
351
350
run_parser .add_argument ('-i' , '--iterations' ,
0 commit comments