Skip to content

Commit a7cbdb4

Browse files
Bring new Python code in line with subset of PEP 8 used in project.
1 parent 6f77014 commit a7cbdb4

File tree

7 files changed

+67
-85
lines changed

7 files changed

+67
-85
lines changed

benchmark/scripts/Benchmark_Driver

+18-19
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,11 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
3030
def parse_results(res, optset):
3131
# Parse lines like this
3232
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
33-
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," + \
34-
",".join([r"[ \t]*([\d.]+)"]*7))
33+
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," +
34+
",".join([r"[ \t]*([\d.]+)"] * 7))
3535
# The Totals line would be parsed like this.
36-
TOTALRE = re.compile(r"()(Totals)," + \
37-
",".join([r"[ \t]*([\d.]+)"]*7))
36+
TOTALRE = re.compile(r"()(Totals)," +
37+
",".join([r"[ \t]*([\d.]+)"] * 7))
3838
KEYGROUP = 2
3939
VALGROUP = 4
4040
MEMGROUP = 9
@@ -51,14 +51,14 @@ def parse_results(res, optset):
5151
test = {}
5252
test['Data'] = [testresult]
5353
test['Info'] = {}
54-
test['Name'] = "nts.swift/"+optset+"."+testname+".exec"
54+
test['Name'] = "nts.swift/" + optset + "." + testname + ".exec"
5555
tests.append(test)
5656
if testname != 'Totals':
5757
mem_testresult = int(m.group(MEMGROUP))
5858
mem_test = {}
5959
mem_test['Data'] = [mem_testresult]
6060
mem_test['Info'] = {}
61-
mem_test['Name'] = "nts.swift/mem_maxrss."+optset+"."+testname+".mem"
61+
mem_test['Name'] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
6262
tests.append(mem_test)
6363
return tests
6464

@@ -85,7 +85,7 @@ def instrument_test(driver_path, test, num_samples):
8585
)
8686
peak_memory = re.match('\s*(\d+)\s*maximum resident set size',
8787
test_output_raw.split('\n')[-15]).group(1)
88-
test_outputs.append(test_output_raw.split()[1].split(',') + \
88+
test_outputs.append(test_output_raw.split()[1].split(',') +
8989
[peak_memory])
9090

9191
# Average sample results
@@ -102,7 +102,7 @@ def instrument_test(driver_path, test, num_samples):
102102
for i in range(AVG_START_INDEX, len(test_output)):
103103
avg_test_output[i] += int(test_output[i])
104104
for i in range(AVG_START_INDEX, len(avg_test_output)):
105-
avg_test_output[i] = int(round(avg_test_output[i] / \
105+
avg_test_output[i] = int(round(avg_test_output[i] /
106106
float(len(test_outputs))))
107107
avg_test_output[NUM_SAMPLES_INDEX] = num_samples
108108
avg_test_output[MIN_INDEX] = min(test_outputs,
@@ -152,8 +152,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
152152
only run tests included in it."""
153153
(total_tests, total_min, total_max, total_mean) = (0, 0, 0, 0)
154154
output = []
155-
headings = ['#', 'TEST','SAMPLES','MIN(μs)','MAX(μs)','MEAN(μs)','SD(μs)',
156-
'MEDIAN(μs)','MAX_RSS(B)']
155+
headings = ['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'MAX(μs)', 'MEAN(μs)',
156+
'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)']
157157
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
158158
if verbose and log_directory:
159159
print line_format.format(*headings)
@@ -182,7 +182,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
182182
totals_output = '\n\n' + ','.join(totals)
183183
if verbose:
184184
if log_directory:
185-
print line_format.format(*(['']+totals))
185+
print line_format.format(*([''] + totals))
186186
else:
187187
print totals_output[1:]
188188
formatted_output += totals_output
@@ -204,7 +204,7 @@ def submit(args):
204204
print "\nRunning benchmarks..."
205205
for optset in args.optimization:
206206
print "Opt level:\t", optset
207-
file = os.path.join(args.tests, "Benchmark_"+optset)
207+
file = os.path.join(args.tests, "Benchmark_" + optset)
208208
try:
209209
res = run_benchmarks(file, benchmarks=args.benchmark,
210210
num_samples=args.iterations)
@@ -227,7 +227,7 @@ def submit(args):
227227

228228
def run(args):
229229
optset = args.optimization
230-
file = os.path.join(args.tests, "Benchmark_"+optset)
230+
file = os.path.join(args.tests, "Benchmark_" + optset)
231231
run_benchmarks(file, benchmarks=args.benchmarks,
232232
num_samples=args.iterations, verbose=True,
233233
log_directory=args.output_dir,
@@ -260,10 +260,9 @@ def compare(args):
260260
recent_logs = {}
261261
for branch_dir in [current_branch_dir, master_branch_dir]:
262262
for opt in ['O', 'Onone']:
263-
recent_logs[os.path.basename(branch_dir) + '_' + opt] = \
264-
sorted(glob.glob(os.path.join(branch_dir,
265-
'Benchmark_' + opt + '-*.log')), key=os.path.getctime,
266-
reverse=True)
263+
recent_logs[os.path.basename(branch_dir) + '_' + opt] = sorted(
264+
glob.glob(os.path.join(branch_dir, 'Benchmark_' + opt + '-*.log')),
265+
key=os.path.getctime, reverse=True)
267266

268267
if current_branch == 'master':
269268
if len(recent_logs['master_O']) > 1 and \
@@ -323,7 +322,7 @@ def main():
323322
submit_parser = subparsers.add_parser('submit',
324323
help='run benchmarks and submit results to LNT')
325324
submit_parser.add_argument('-t', '--tests',
326-
help='directory containing Benchmark_O{,none,unchecked} ' + \
325+
help='directory containing Benchmark_O{,none,unchecked} ' +
327326
'(default: DRIVER_DIR)',
328327
default=DRIVER_DIR)
329328
submit_parser.add_argument('-m', '--machine', required=True,
@@ -345,7 +344,7 @@ def main():
345344
run_parser = subparsers.add_parser('run',
346345
help='run benchmarks and output results to stdout')
347346
run_parser.add_argument('-t', '--tests',
348-
help='directory containing Benchmark_O{,none,unchecked} ' + \
347+
help='directory containing Benchmark_O{,none,unchecked} ' +
349348
'(default: DRIVER_DIR)',
350349
default=DRIVER_DIR)
351350
run_parser.add_argument('-i', '--iterations',

benchmark/scripts/Benchmark_GuardMalloc.in

+2-4
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,8 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
4444
print "Running {}...".format(test_name)
4545
sys.stdout.flush()
4646
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
47-
env=data['env'],
48-
stderr=open('/dev/null', 'w'),
49-
stdout=open('/dev/null', 'w'))
47+
env=data['env'], stderr=open('/dev/null', 'w'),
48+
stdout=open('/dev/null', 'w'))
5049
return GuardMallocResult(test_name, status)
5150

5251
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -57,4 +56,3 @@ if __name__ == "__main__":
5756
sys.exit(0)
5857
else:
5958
sys.exit(-1)
60-

benchmark/scripts/Benchmark_RuntimeLeaksRunner.in

+2-3
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# ===----------------------------------------------------------------------===//
1414

1515
import os
16-
import re
1716
import sys
1817
import json
1918
import subprocess
@@ -64,6 +63,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
6463
def __init__(self, binary, xfail_list):
6564
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
6665
enable_parallel=True)
66+
6767
def prepare_input(self, name):
6868
return {}
6969

@@ -75,8 +75,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
7575
p = subprocess.Popen([data['path'], "--run-all", "--num-samples=2",
7676
"--num-iters={}".format(2), data['test_name']],
7777
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
78-
status = p.wait()
79-
output = p.stdout.readlines()
78+
p.wait()
8079
error_out = p.stderr.readlines()
8180
except:
8281
print("Child Process Failed! (%s,%s)" % (data['path'], data['test_name']))

benchmark/scripts/compare_perf_tests.py

+34-26
Original file line numberDiff line numberDiff line change
@@ -19,27 +19,26 @@
1919
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t
2020

2121
import sys
22-
import os
2322
import re
2423

25-
VERBOSE=0
24+
VERBOSE = 0
2625

2726
# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
28-
SCORERE=re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
29-
TOTALRE=re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
30-
KEYGROUP=2
31-
VALGROUP=4
32-
NUMGROUP=1
27+
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
28+
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
29+
KEYGROUP = 2
30+
VALGROUP = 4
31+
NUMGROUP = 1
3332

34-
IsTime=1
35-
ShowSpeedup=1
36-
PrintAllScores=0
33+
IsTime = 1
34+
ShowSpeedup = 1
35+
PrintAllScores = 0
3736

3837
def parseInt(word):
3938
try:
4039
return int(word)
4140
except:
42-
raise ScoreParserException("Expected integer value, not "+word)
41+
raise Exception("Expected integer value, not " + word)
4342

4443
def getScores(fname):
4544
scores = {}
@@ -48,7 +47,8 @@ def getScores(fname):
4847
f = open(fname)
4948
try:
5049
for line in f:
51-
if VERBOSE: print "Parsing", line,
50+
if VERBOSE:
51+
print "Parsing", line,
5252
m = SCORERE.match(line)
5353
is_total = False
5454
if not m:
@@ -57,7 +57,8 @@ def getScores(fname):
5757
if not m:
5858
continue
5959

60-
if VERBOSE: print " match", m.group(KEYGROUP), m.group(VALGROUP)
60+
if VERBOSE:
61+
print " match", m.group(KEYGROUP), m.group(VALGROUP)
6162

6263
if not m.group(KEYGROUP) in scores:
6364
scores[m.group(KEYGROUP)] = []
@@ -90,31 +91,34 @@ def compareScores(key, score1, score2, runs, num):
9091
bestscore1 = score
9192
if isMaxScore(newscore=score, maxscore=worstscore1, invert=minworst):
9293
worstscore1 = score
93-
if PrintAllScores: print ("%d" % score).rjust(16),
94+
if PrintAllScores:
95+
print ("%d" % score).rjust(16),
9496
for score in score2:
9597
if isMaxScore(newscore=score, maxscore=bestscore2, invert=minbest):
9698
bestscore2 = score
9799
if isMaxScore(newscore=score, maxscore=worstscore2, invert=minworst):
98100
worstscore2 = score
99-
if PrintAllScores: print ("%d" % score).rjust(16),
101+
if PrintAllScores:
102+
print ("%d" % score).rjust(16),
100103
r += 1
101104
while r < runs:
102-
if PrintAllScores: print ("0").rjust(9),
105+
if PrintAllScores:
106+
print ("0").rjust(9),
103107
r += 1
104108

105109
if not PrintAllScores:
106110
print ("%d" % bestscore1).rjust(16),
107111
print ("%d" % bestscore2).rjust(16),
108112

109-
print ("%+d" % (bestscore2-bestscore1)).rjust(9),
113+
print ("%+d" % (bestscore2 - bestscore1)).rjust(9),
110114

111115
if bestscore1 != 0 and bestscore2 != 0:
112-
print ("%+.1f%%"%(((float(bestscore2)/bestscore1)-1)*100)).rjust(9),
116+
print ("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9),
113117
if ShowSpeedup:
114118
Num, Den = float(bestscore2), float(bestscore1)
115119
if IsTime:
116120
Num, Den = Den, Num
117-
print ("%.2fx"%(Num/Den)).rjust(9),
121+
print ("%.2fx" % (Num / Den)).rjust(9),
118122
else:
119123
print "*".rjust(9),
120124
if ShowSpeedup:
@@ -165,32 +169,36 @@ def usage():
165169
if runs2 > runs:
166170
runs = runs2
167171

168-
if VERBOSE: print scores1; print scores2
172+
if VERBOSE:
173+
print scores1
174+
print scores2
169175

170176
keys = [f for f in set(scores1.keys() + scores2.keys())]
171177
keys.sort()
172178
if VERBOSE:
173179
print "comparing ", file1, "vs", file2, "=",
174-
if IsTime: print file1, "/", file2
175-
else: print file2, "/", file1
180+
if IsTime:
181+
print file1, "/", file2
182+
else:
183+
print file2, "/", file1
176184

177185
print "#".rjust(3),
178186
print "TEST".ljust(25),
179187
if PrintAllScores:
180-
for i in range(0,runs):
188+
for i in range(0, runs):
181189
print ("OLD_RUN%d" % i).rjust(9),
182-
for i in range(0,runs):
190+
for i in range(0, runs):
183191
print ("NEW_RUN%d" % i).rjust(9),
184192
else:
185193
print "BEST_OLD_MIN(μs)".rjust(17),
186194
print "BEST_NEW_MIN(μs)".rjust(17),
187195
print 'DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9)
188196

189197
for key in keys:
190-
if not key in scores1:
198+
if key not in scores1:
191199
print key, "not in", file1
192200
continue
193-
if not key in scores2:
201+
if key not in scores2:
194202
print key, "not in", file2
195203
continue
196204
compareScores(key, scores1[key], scores2[key], runs, nums[key])

benchmark/scripts/generate_harness/generate_harness.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
template_loader = jinja2.FileSystemLoader(searchpath="/")
3434
template_env = jinja2.Environment(loader=template_loader, trim_blocks=True,
35-
lstrip_blocks=True)
35+
lstrip_blocks=True)
3636

3737
if __name__ == '__main__':
3838
# CMakeList single-source
@@ -44,7 +44,7 @@ class multi_source_bench(object):
4444
def __init__(self, path):
4545
self.name = os.path.basename(path)
4646
self.files = [x for x in os.listdir(path)
47-
if x.endswith('.swift')]
47+
if x.endswith('.swift')]
4848
if os.path.isdir(multi_source_dir):
4949
multisource_benches = [
5050
multi_source_bench(os.path.join(multi_source_dir, x))
@@ -62,6 +62,7 @@ def get_run_funcs(filepath):
6262
content = open(filepath).read()
6363
matches = re.findall(r'func run_(.*?)\(', content)
6464
return filter(lambda x: x not in ignored_run_funcs, matches)
65+
6566
def find_run_funcs(dirs):
6667
ret_run_funcs = []
6768
for d in dirs:
@@ -71,9 +72,9 @@ def find_run_funcs(dirs):
7172
ret_run_funcs.extend(run_funcs)
7273
return ret_run_funcs
7374
run_funcs = sorted(
74-
[(x, x)
75-
for x in find_run_funcs([single_source_dir, multi_source_dir])],
76-
key=lambda x: x[0]
75+
[(x, x)
76+
for x in find_run_funcs([single_source_dir, multi_source_dir])],
77+
key=lambda x: x[0]
7778
)
7879

7980
# Replace originals with files generated from templates
@@ -87,4 +88,3 @@ def find_run_funcs(dirs):
8788
imports=imports,
8889
run_funcs=run_funcs)
8990
)
90-

benchmark/scripts/perf_test_driver/perf_test_driver.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# ===----------------------------------------------------------------------===//
1414

1515
import os
16-
import sys
1716
import subprocess
1817
import multiprocessing
1918
import re
@@ -85,7 +84,7 @@ def prepare_input_wrapper(name):
8584
results = None
8685
if self.enable_parallel:
8786
p = multiprocessing.Pool()
88-
z = zip([self]*len(prepared_input), prepared_input)
87+
z = zip([self] * len(prepared_input), prepared_input)
8988
results = p.map(_unwrap_self, z)
9089
else:
9190
results = map(self.process_input, prepared_input)
@@ -112,4 +111,3 @@ def run(self):
112111
has_failure = reduce(max, [d['has_failure']for d in self.data])
113112
self.print_data(self.data, max_test_len)
114113
return not has_failure
115-

0 commit comments

Comments
 (0)