Skip to content

Commit 183da81

Browse files
[Python] Fix five classes of PEP-8 violations (E101/E111/E128/E302/W191)
* E101: indentation contains mixed spaces and tabs * E111: indentation is not a multiple of four * E128: continuation line under-indented for visual indent * E302: expected 2 blank lines, found 1 * W191: indentation contains tabs
1 parent 5585687 commit 183da81

38 files changed

+1033
-701
lines changed

.pep8

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
[flake8]
22
filename = *.py,Benchmark_Driver,Benchmark_DTrace.in,Benchmark_GuardMalloc.in,Benchmark_RuntimeLeaksRunner.in,build-script,gyb,line-directive,ns-html2rst,recursive-lipo,rth,submit-benchmark-results,update-checkout,viewcfg
3-
ignore = D100,D101,D102,D103,D104,D105,E101,E111,E128,E302,E402,E501,W191
3+
ignore = D100,D101,D102,D103,D104,D105,E402,E501

benchmark/scripts/Benchmark_DTrace.in

+8-3
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import perf_test_driver
2727
XFAIL_LIST = [
2828
]
2929

30+
3031
class DTraceResult(perf_test_driver.Result):
3132

3233
def __init__(self, name, status, output, csv_output):
@@ -59,11 +60,14 @@ class DTraceResult(perf_test_driver.Result):
5960

6061
print(DTraceResult.data_format(max_test_len).format(*result))
6162

63+
6264
class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
65+
6366
def __init__(self, binary, xfail_list, csv_output):
64-
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
65-
enable_parallel=False,
66-
opt_levels=['O'])
67+
perf_test_driver.BenchmarkDriver.__init__(
68+
self, binary, xfail_list,
69+
enable_parallel=False,
70+
opt_levels=['O'])
6771
self.csv_output = csv_output
6872

6973
def print_data_header(self, max_test_len):
@@ -94,6 +98,7 @@ class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
9498

9599
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
96100

101+
97102
def parse_args():
98103
parser = argparse.ArgumentParser()
99104
parser.add_argument('-filter', type=str, default=None,

benchmark/scripts/Benchmark_Driver

+93-55
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import glob
2727

2828
DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
2929

30+
3031
def parse_results(res, optset):
3132
# Parse lines like this
3233
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
@@ -58,10 +59,12 @@ def parse_results(res, optset):
5859
mem_test = {}
5960
mem_test['Data'] = [mem_testresult]
6061
mem_test['Info'] = {}
61-
mem_test['Name'] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
62+
mem_test['Name'] = "nts.swift/mem_maxrss." + \
63+
optset + "." + testname + ".mem"
6264
tests.append(mem_test)
6365
return tests
6466

67+
6568
def submit_to_lnt(data, url):
6669
print "\nSubmitting results to LNT server..."
6770
json_report = {'input_data': json.dumps(data), 'commit': '1'}
@@ -75,6 +78,7 @@ def submit_to_lnt(data, url):
7578
print "Error:\t", response['error']
7679
sys.exit(1)
7780

81+
7882
def instrument_test(driver_path, test, num_samples):
7983
"""Run a test and instrument its peak memory use"""
8084
test_outputs = []
@@ -113,14 +117,18 @@ def instrument_test(driver_path, test, num_samples):
113117

114118
return avg_test_output
115119

120+
116121
def get_tests(driver_path):
117122
"""Return a list of available performance tests"""
118123
return subprocess.check_output([driver_path, '--list']).split()[2:]
119124

125+
120126
def get_current_git_branch(git_repo_path):
121127
"""Return the selected branch for the repo `git_repo_path`"""
122-
return subprocess.check_output(['git', '-C', git_repo_path, 'rev-parse',
123-
'--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip()
128+
return subprocess.check_output(
129+
['git', '-C', git_repo_path, 'rev-parse',
130+
'--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip()
131+
124132

125133
def log_results(log_directory, driver, formatted_output, swift_repo=None):
126134
"""Log `formatted_output` to a branch specific directory in
@@ -146,6 +154,7 @@ def log_results(log_directory, driver, formatted_output, swift_repo=None):
146154
with open(log_file, 'w') as f:
147155
f.write(formatted_output)
148156

157+
149158
def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
150159
log_directory=None, swift_repo=None):
151160
"""Run perf tests individually and return results in a format that's
@@ -192,6 +201,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
192201
log_results(log_directory, driver, formatted_output, swift_repo)
193202
return formatted_output
194203

204+
195205
def submit(args):
196206
print "SVN revision:\t", args.revision
197207
print "Machine name:\t", args.machine
@@ -208,8 +218,9 @@ def submit(args):
208218
print "Opt level:\t", optset
209219
file = os.path.join(args.tests, "Benchmark_" + optset)
210220
try:
211-
res = run_benchmarks(file, benchmarks=args.benchmark,
212-
num_samples=args.iterations)
221+
res = run_benchmarks(
222+
file, benchmarks=args.benchmark,
223+
num_samples=args.iterations)
213224
data['Tests'].extend(parse_results(res, optset))
214225
except subprocess.CalledProcessError as e:
215226
print "Execution failed.. Test results are empty."
@@ -227,24 +238,29 @@ def submit(args):
227238
submit_to_lnt(data, args.lnt_host)
228239
return 0
229240

241+
230242
def run(args):
231243
optset = args.optimization
232244
file = os.path.join(args.tests, "Benchmark_" + optset)
233-
run_benchmarks(file, benchmarks=args.benchmarks,
234-
num_samples=args.iterations, verbose=True,
235-
log_directory=args.output_dir,
236-
swift_repo=args.swift_repo)
245+
run_benchmarks(
246+
file, benchmarks=args.benchmarks,
247+
num_samples=args.iterations, verbose=True,
248+
log_directory=args.output_dir,
249+
swift_repo=args.swift_repo)
237250
return 0
238251

252+
239253
def format_name(log_path):
240254
"""Return the filename and directory for a log file"""
241255
return '/'.join(log_path.split('/')[-2:])
242256

257+
243258
def compare_logs(compare_script, new_log, old_log):
244259
"""Return diff of log files at paths `new_log` and `old_log`"""
245260
print 'Comparing %s %s ...' % (format_name(old_log), format_name(new_log))
246261
subprocess.call([compare_script, old_log, new_log])
247262

263+
248264
def compare(args):
249265
log_dir = args.log_dir
250266
swift_repo = args.swift_repo
@@ -263,7 +279,8 @@ def compare(args):
263279
for branch_dir in [current_branch_dir, master_branch_dir]:
264280
for opt in ['O', 'Onone']:
265281
recent_logs[os.path.basename(branch_dir) + '_' + opt] = sorted(
266-
glob.glob(os.path.join(branch_dir, 'Benchmark_' + opt + '-*.log')),
282+
glob.glob(os.path.join(
283+
branch_dir, 'Benchmark_' + opt + '-*.log')),
267284
key=os.path.getctime, reverse=True)
268285

269286
if current_branch == 'master':
@@ -311,65 +328,86 @@ def compare(args):
311328

312329
return 0
313330

331+
314332
def positive_int(value):
315333
ivalue = int(value)
316334
if not (ivalue > 0):
317335
raise ValueError
318336
return ivalue
319337

338+
320339
def main():
321340
parser = argparse.ArgumentParser(description='Swift benchmarks driver')
322341
subparsers = parser.add_subparsers()
323342

324-
submit_parser = subparsers.add_parser('submit',
325-
help='run benchmarks and submit results to LNT')
326-
submit_parser.add_argument('-t', '--tests',
327-
help='directory containing Benchmark_O{,none,unchecked} ' +
328-
'(default: DRIVER_DIR)',
329-
default=DRIVER_DIR)
330-
submit_parser.add_argument('-m', '--machine', required=True,
331-
help='LNT machine name')
332-
submit_parser.add_argument('-r', '--revision', required=True,
333-
help='SVN revision of compiler to identify the LNT run', type=int)
334-
submit_parser.add_argument('-l', '--lnt_host', required=True,
335-
help='LNT host to submit results to')
336-
submit_parser.add_argument('-i', '--iterations',
337-
help='number of times to run each test (default: 10)',
338-
type=positive_int, default=10)
339-
submit_parser.add_argument('-o', '--optimization', nargs='+',
340-
help='optimization levels to use (default: O Onone Ounchecked)',
341-
default=['O', 'Onone', 'Ounchecked'])
342-
submit_parser.add_argument('benchmark',
343-
help='benchmark to run (default: all)', nargs='*')
343+
submit_parser = subparsers.add_parser(
344+
'submit',
345+
help='run benchmarks and submit results to LNT')
346+
submit_parser.add_argument(
347+
'-t', '--tests',
348+
help='directory containing Benchmark_O{,none,unchecked} ' +
349+
'(default: DRIVER_DIR)',
350+
default=DRIVER_DIR)
351+
submit_parser.add_argument(
352+
'-m', '--machine', required=True,
353+
help='LNT machine name')
354+
submit_parser.add_argument(
355+
'-r', '--revision', required=True,
356+
help='SVN revision of compiler to identify the LNT run', type=int)
357+
submit_parser.add_argument(
358+
'-l', '--lnt_host', required=True,
359+
help='LNT host to submit results to')
360+
submit_parser.add_argument(
361+
'-i', '--iterations',
362+
help='number of times to run each test (default: 10)',
363+
type=positive_int, default=10)
364+
submit_parser.add_argument(
365+
'-o', '--optimization', nargs='+',
366+
help='optimization levels to use (default: O Onone Ounchecked)',
367+
default=['O', 'Onone', 'Ounchecked'])
368+
submit_parser.add_argument(
369+
'benchmark',
370+
help='benchmark to run (default: all)', nargs='*')
344371
submit_parser.set_defaults(func=submit)
345372

346-
run_parser = subparsers.add_parser('run',
347-
help='run benchmarks and output results to stdout')
348-
run_parser.add_argument('-t', '--tests',
349-
help='directory containing Benchmark_O{,none,unchecked} ' +
350-
'(default: DRIVER_DIR)',
351-
default=DRIVER_DIR)
352-
run_parser.add_argument('-i', '--iterations',
353-
help='number of times to run each test (default: 1)',
354-
type=positive_int, default=1)
355-
run_parser.add_argument('-o', '--optimization',
356-
help='optimization level to use (default: O)', default='O')
357-
run_parser.add_argument('--output-dir',
358-
help='log results to directory (default: no logging)')
359-
run_parser.add_argument('--swift-repo',
360-
help='absolute path to Swift source repo for branch comparison')
361-
run_parser.add_argument('benchmarks',
362-
help='benchmark to run (default: all)', nargs='*')
373+
run_parser = subparsers.add_parser(
374+
'run',
375+
help='run benchmarks and output results to stdout')
376+
run_parser.add_argument(
377+
'-t', '--tests',
378+
help='directory containing Benchmark_O{,none,unchecked} ' +
379+
'(default: DRIVER_DIR)',
380+
default=DRIVER_DIR)
381+
run_parser.add_argument(
382+
'-i', '--iterations',
383+
help='number of times to run each test (default: 1)',
384+
type=positive_int, default=1)
385+
run_parser.add_argument(
386+
'-o', '--optimization',
387+
help='optimization level to use (default: O)', default='O')
388+
run_parser.add_argument(
389+
'--output-dir',
390+
help='log results to directory (default: no logging)')
391+
run_parser.add_argument(
392+
'--swift-repo',
393+
help='absolute path to Swift source repo for branch comparison')
394+
run_parser.add_argument(
395+
'benchmarks',
396+
help='benchmark to run (default: all)', nargs='*')
363397
run_parser.set_defaults(func=run)
364398

365-
compare_parser = subparsers.add_parser('compare',
366-
help='compare benchmark results')
367-
compare_parser.add_argument('--log-dir', required=True,
368-
help='directory containing benchmark logs')
369-
compare_parser.add_argument('--swift-repo', required=True,
370-
help='absolute path to Swift source repo')
371-
compare_parser.add_argument('--compare-script', required=True,
372-
help='absolute path to compare script')
399+
compare_parser = subparsers.add_parser(
400+
'compare',
401+
help='compare benchmark results')
402+
compare_parser.add_argument(
403+
'--log-dir', required=True,
404+
help='directory containing benchmark logs')
405+
compare_parser.add_argument(
406+
'--swift-repo', required=True,
407+
help='absolute path to Swift source repo')
408+
compare_parser.add_argument(
409+
'--compare-script', required=True,
410+
help='absolute path to compare script')
373411
compare_parser.set_defaults(func=compare)
374412

375413
args = parser.parse_args()

benchmark/scripts/Benchmark_GuardMalloc.in

+8-3
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,19 @@ import perf_test_driver
2424
XFAIL_LIST = [
2525
]
2626

27+
2728
class GuardMallocResult(perf_test_driver.Result):
2829

2930
def __init__(self, name, status):
3031
perf_test_driver.Result.__init__(self, name, status, "", XFAIL_LIST)
3132

33+
3234
class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
35+
3336
def __init__(self, binary, xfail_list):
34-
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
35-
enable_parallel=True)
37+
perf_test_driver.BenchmarkDriver.__init__(
38+
self, binary, xfail_list,
39+
enable_parallel=True)
3640
self.new_env = os.environ.copy()
3741
self.new_env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib'
3842

@@ -43,7 +47,8 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
4347
test_name = '({},{})'.format(data['opt'], data['test_name'])
4448
print "Running {}...".format(test_name)
4549
sys.stdout.flush()
46-
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
50+
status = subprocess.call(
51+
[data['path'], data['test_name'], '--num-iters=2'],
4752
env=data['env'], stderr=open('/dev/null', 'w'),
4853
stdout=open('/dev/null', 'w'))
4954
return GuardMallocResult(test_name, status)

benchmark/scripts/Benchmark_RuntimeLeaksRunner.in

+5-2
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,19 @@ IGNORABLE_GLOBAL_OBJC_CLASSES = set([
5353
'_NSJSONReader'
5454
])
5555

56+
5657
class LeaksRunnerResult(perf_test_driver.Result):
5758

5859
def __init__(self, name, status):
5960
perf_test_driver.Result.__init__(self, name, status, "", XFAIL_LIST)
6061

62+
6163
class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
6264

6365
def __init__(self, binary, xfail_list):
64-
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
65-
enable_parallel=True)
66+
perf_test_driver.BenchmarkDriver.__init__(
67+
self, binary, xfail_list,
68+
enable_parallel=True)
6669

6770
def prepare_input(self, name):
6871
return {}

benchmark/scripts/compare_perf_tests.py

+6
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,14 @@
3535
ShowSpeedup = 1
3636
PrintAllScores = 0
3737

38+
3839
def parse_int(word):
3940
try:
4041
return int(word)
4142
except:
4243
raise Exception("Expected integer value, not " + word)
4344

45+
4446
def get_scores(fname):
4547
scores = {}
4648
worstscores = {}
@@ -77,9 +79,11 @@ def get_scores(fname):
7779
f.close()
7880
return scores, worstscores, runs, nums
7981

82+
8083
def is_max_score(newscore, maxscore, invert):
8184
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)
8285

86+
8387
def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
8488
print num.rjust(3),
8589
print key.ljust(25),
@@ -144,6 +148,7 @@ def compare_scores(key, score1, worstsample1, score2, worstsample2, runs, num):
144148
print "(?)",
145149
print
146150

151+
147152
def print_best_scores(key, scores):
148153
print key,
149154
bestscore = None
@@ -153,6 +158,7 @@ def print_best_scores(key, scores):
153158
bestscore = score
154159
print ", %d" % bestscore
155160

161+
156162
def usage():
157163
print "repeat.sh <n> Benchmark_O[none|unchecked] > file.times"
158164
print "compare_perf_tests.py <file.times> [<file2.times>]"

0 commit comments

Comments
 (0)