[LNT] r317746 - Fix remaining pycodestyle/pep8 warnings, add lint script
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 8 16:27:12 PST 2017
Author: matze
Date: Wed Nov 8 16:27:12 2017
New Revision: 317746
URL: http://llvm.org/viewvc/llvm-project?rev=317746&view=rev
Log:
Fix remaining pycodestyle/pep8 warnings, add lint script
- Add `utils/lint.sh` that invokes pycodestyle, excluding external
packages, the docs config and the tests.
- Fix remaining warnings.
Added:
lnt/trunk/utils/lint.sh (with props)
Modified:
lnt/trunk/deployment/app_wrapper.py
lnt/trunk/examples/functions.py
lnt/trunk/lnt/formats/JSONFormat.py
lnt/trunk/lnt/formats/PlistFormat.py
lnt/trunk/lnt/formats/__init__.py
lnt/trunk/lnt/lnttool/admin.py
lnt/trunk/lnt/lnttool/convert.py
lnt/trunk/lnt/lnttool/import_report.py
lnt/trunk/lnt/lnttool/main.py
lnt/trunk/lnt/server/db/migrations/upgrade_0_to_1.py
lnt/trunk/lnt/server/db/migrations/upgrade_6_to_7.py
lnt/trunk/lnt/server/db/regression.py
lnt/trunk/lnt/server/db/rules/rule_testhook.py
lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py
lnt/trunk/lnt/server/db/rules/rule_update_profile_stats.py
lnt/trunk/lnt/server/db/rules_manager.py
lnt/trunk/lnt/server/db/search.py
lnt/trunk/lnt/server/db/testsuitedb.py
lnt/trunk/lnt/server/db/v4db.py
lnt/trunk/lnt/server/ui/api.py
lnt/trunk/lnt/server/ui/app.py
lnt/trunk/lnt/server/ui/globals.py
lnt/trunk/lnt/server/ui/profile_views.py
lnt/trunk/lnt/server/ui/util.py
lnt/trunk/lnt/server/ui/views.py
lnt/trunk/lnt/testing/__init__.py
lnt/trunk/lnt/testing/profile/__init__.py
lnt/trunk/lnt/testing/profile/perf.py
lnt/trunk/lnt/testing/profile/profile.py
lnt/trunk/lnt/testing/profile/profilev1impl.py
lnt/trunk/lnt/testing/profile/profilev2impl.py
lnt/trunk/lnt/testing/util/compilers.py
lnt/trunk/lnt/tests/builtintest.py
lnt/trunk/lnt/tests/compile.py
lnt/trunk/lnt/tests/nt.py
lnt/trunk/lnt/tests/test_suite.py
lnt/trunk/lnt/util/ImportData.py
lnt/trunk/lnt/util/ServerUtil.py
lnt/trunk/lnt/util/async_ops.py
lnt/trunk/lnt/util/stats.py
lnt/trunk/lnt/util/wsgi_restart.py
lnt/trunk/setup.py
lnt/trunk/tests/lnttool/submit.shtest
Modified: lnt/trunk/deployment/app_wrapper.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/deployment/app_wrapper.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/deployment/app_wrapper.py (original)
+++ lnt/trunk/deployment/app_wrapper.py Wed Nov 8 16:27:12 2017
@@ -2,7 +2,7 @@
This can be used for deploying on the cloud.
"""
-
import lnt.server.ui.app
-app = lnt.server.ui.app.App.create_standalone('lnt.cfg', '/var/log/lnt/lnt.log')
+app = lnt.server.ui.app.App.create_standalone('lnt.cfg',
+ '/var/log/lnt/lnt.log')
Modified: lnt/trunk/examples/functions.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/examples/functions.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/examples/functions.py (original)
+++ lnt/trunk/examples/functions.py Wed Nov 8 16:27:12 2017
@@ -1,49 +1,49 @@
#!/usr/bin/env python
-
"""
-Simple example of a test generator which just produces data on some mathematical
-functions, keyed off of the current time.
+Simple example of a test generator which just produces data on some
+mathematical functions, keyed off of the current time.
"""
+from lnt.testing import Machine, Run, TestSamples, Report
+import math
+import random
+import sys
+import time
-import sys, time
-import math, random
-
-from lnt.testing import *
def main():
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] [output]")
- opts,args = parser.parse_args()
+ opts, args = parser.parse_args()
if len(args) == 0:
output = '-'
elif len(args) == 1:
- output, = args
+ output = args[0]
else:
parser.error("invalid number of arguments")
if output == '-':
output = sys.stdout
else:
- output = open(output,'w')
+ output = open(output, 'w')
offset = math.pi/5
delay = 120.
- machine = Machine('Mr. Sin Wave', info = { 'delay' : delay })
+ machine = Machine('Mr. Sin Wave', info={'delay': delay})
start = time.time()
- run = Run(start, start, info = { 't' : start,
- 'tag' : 'simple',
- 'run_order' : 1 })
+ run = Run(start, start, info={'t': start,
+ 'tag': 'simple',
+ 'run_order': 1})
tests = [TestSamples('simple.%s' % name,
- [fn(start*2*math.pi / delay + j * offset)],
- info = { 'offset' : j })
+ [fn(start*2*math.pi / delay + j * offset)],
+ info={'offset': j})
for j in range(5)
- for name,fn in (('sin',math.sin),
- ('cos',math.cos),
- ('random',lambda x: random.random()))]
+ for name, fn in (('sin', math.sin),
+ ('cos', math.cos),
+ ('random', lambda x: random.random()))]
report = Report(machine, run, tests)
@@ -52,5 +52,6 @@ def main():
if output is not sys.stderr:
output.close()
+
if __name__ == '__main__':
main()
Modified: lnt/trunk/lnt/formats/JSONFormat.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/formats/JSONFormat.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/formats/JSONFormat.py (original)
+++ lnt/trunk/lnt/formats/JSONFormat.py Wed Nov 8 16:27:12 2017
@@ -1,5 +1,6 @@
import json
+
def _matches_format(path_or_file):
if isinstance(path_or_file, str):
path_or_file = open(path_or_file)
@@ -7,16 +8,20 @@ def _matches_format(path_or_file):
try:
json.load(path_or_file)
return True
- except:
+ except Exception:
return False
+
def _load_format(path_or_file):
if isinstance(path_or_file, str):
path_or_file = open(path_or_file)
-
+
return json.load(path_or_file)
-
-format = { 'name' : 'json',
- 'predicate' : _matches_format,
- 'read' : _load_format,
- 'write' : json.dump }
+
+
+format = {
+ 'name': 'json',
+ 'predicate': _matches_format,
+ 'read': _load_format,
+ 'write': json.dump,
+}
Modified: lnt/trunk/lnt/formats/PlistFormat.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/formats/PlistFormat.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/formats/PlistFormat.py (original)
+++ lnt/trunk/lnt/formats/PlistFormat.py Wed Nov 8 16:27:12 2017
@@ -1,13 +1,17 @@
import plistlib
+
def _matches_format(path_or_file):
try:
plistlib.readPlist(path_or_file)
return True
- except:
+ except Exception:
return False
-format = { 'name' : 'plist',
- 'predicate' : _matches_format,
- 'read' : plistlib.readPlist,
- 'write' : plistlib.writePlist }
+
+format = {
+ 'name': 'plist',
+ 'predicate': _matches_format,
+ 'read': plistlib.readPlist,
+ 'write': plistlib.writePlist,
+}
Modified: lnt/trunk/lnt/formats/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/formats/__init__.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/formats/__init__.py (original)
+++ lnt/trunk/lnt/formats/__init__.py Wed Nov 8 16:27:12 2017
@@ -2,9 +2,9 @@
Utilities for converting to LNT's test format.
LNT formats are described by dictionaries with 'name', 'read', and 'write'
-fields. Only the 'name' field is required. The 'read' field should be a callable
-taking a path_or_file object, the 'write' function should be a callable taking a
-Python object to write, and the path_or_file to write to.
+fields. Only the 'name' field is required. The 'read' field should be a
+callable taking a path_or_file object, the 'write' function should be a
+callable taking a Python object to write, and the path_or_file to write to.
"""
from PlistFormat import format as plist
@@ -14,6 +14,7 @@ formats = [plist, json]
formats_by_name = dict((f['name'], f) for f in formats)
format_names = formats_by_name.keys()
+
def get_format(name):
"""get_format(name) -> [format]
@@ -22,6 +23,7 @@ def get_format(name):
return formats_by_name.get(name)
+
def guess_format(path_or_file):
"""guess_format(path_or_file) -> [format]
@@ -42,7 +44,7 @@ def guess_format(path_or_file):
try:
if not f['predicate'](path_or_file):
continue
- except:
+ except Exception:
continue
finally:
if is_file:
@@ -57,6 +59,7 @@ def guess_format(path_or_file):
return matches
+
def read_any(path_or_file, format_name):
"""read_any(path_or_file, format_name) -> [format]
@@ -68,15 +71,16 @@ def read_any(path_or_file, format_name):
f = guess_format(path_or_file)
if f is None:
if isinstance(path_or_file, str):
- raise SystemExit("unable to guess input format for %r" % (
+ raise ValueError("unable to guess input format for %r" % (
path_or_file,))
else:
- raise SystemExit("unable to guess input format for file")
+ raise ValueError("unable to guess input format for file")
else:
f = get_format(format_name)
if f is None or not f.get('read'):
- raise SystemExit("unknown input format: %r" % format_name)
+ raise ValueError("unknown input format: %r" % format_name)
return f['read'](path_or_file)
+
__all__ = ['get_format', 'guess_format', 'read_any'] + format_names
Modified: lnt/trunk/lnt/lnttool/admin.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/admin.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/admin.py (original)
+++ lnt/trunk/lnt/lnttool/admin.py Wed Nov 8 16:27:12 2017
@@ -317,7 +317,7 @@ def action_post_run(config, datafiles, s
response_data = json.loads(response.text)
json.dump(response_data, sys.stderr, response_data, indent=2,
sort_keys=True)
- except:
+ except Exception:
sys.stderr.write(response.text)
sys.stderr.write('\n')
Modified: lnt/trunk/lnt/lnttool/convert.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/convert.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/convert.py (original)
+++ lnt/trunk/lnt/lnttool/convert.py Wed Nov 8 16:27:12 2017
@@ -33,7 +33,7 @@ def action_convert(input, output, input_
finally:
if output != sys.stdout:
output.close()
- except:
+ except Exception:
if output != sys.stdout:
os.remove(output)
raise
Modified: lnt/trunk/lnt/lnttool/import_report.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/import_report.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/import_report.py (original)
+++ lnt/trunk/lnt/lnttool/import_report.py Wed Nov 8 16:27:12 2017
@@ -43,7 +43,8 @@ def action_importreport(input, output, s
key, val = line.split()
metric = key.split(".")[1]
metric_type = float if metric not in ("hash", "profile") else str
- test = lnt.testing.TestSamples(suite + "." + key, [val], conv_f = metric_type)
+ test = lnt.testing.TestSamples(suite + "." + key, [val],
+ conv_f=metric_type)
report.tests.extend([test])
Modified: lnt/trunk/lnt/lnttool/main.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/main.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/main.py (original)
+++ lnt/trunk/lnt/lnttool/main.py Wed Nov 8 16:27:12 2017
@@ -188,8 +188,8 @@ def action_submit(url, files, select_mac
import lnt.util.ImportData
results = ServerUtil.submitFiles(url, files, verbose,
- select_machine=select_machine,
- merge_run=merge)
+ select_machine=select_machine,
+ merge_run=merge)
for submitted_file in results:
if verbose:
lnt.util.ImportData.print_report_result(
@@ -497,6 +497,8 @@ def main():
Use ``lnt <command> --help`` for more information on a specific command.
"""
_version_check()
+
+
main.add_command(action_check_no_errors)
main.add_command(action_checkformat)
main.add_command(action_convert)
Modified: lnt/trunk/lnt/server/db/migrations/upgrade_0_to_1.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/migrations/upgrade_0_to_1.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/migrations/upgrade_0_to_1.py (original)
+++ lnt/trunk/lnt/server/db/migrations/upgrade_0_to_1.py Wed Nov 8 16:27:12 2017
@@ -183,11 +183,13 @@ def get_base_for_testsuite(test_suite):
class Machine(UpdatedBase):
__tablename__ = db_key_name + '_Machine'
- __table_args__ = {'mysql_collate': 'utf8_bin'} # For case sensitive compare.
+ # For case sensitive compare.
+ __table_args__ = {'mysql_collate': 'utf8_bin'}
id = Column("ID", Integer, primary_key=True)
name = Column("Name", String(256), index=True)
- parameters_data = Column("Parameters", Binary, index=False, unique=False)
+ parameters_data = Column("Parameters", Binary, index=False,
+ unique=False)
class_dict = locals()
for item in test_suite.machine_fields:
@@ -230,7 +232,8 @@ def get_base_for_testsuite(test_suite):
end_time = Column("EndTime", DateTime)
simple_run_id = Column("SimpleRunID", Integer)
- parameters_data = Column("Parameters", Binary, index=False, unique=False)
+ parameters_data = Column("Parameters", Binary, index=False,
+ unique=False)
machine = sqlalchemy.orm.relation(Machine)
order = sqlalchemy.orm.relation(Order)
Modified: lnt/trunk/lnt/server/db/migrations/upgrade_6_to_7.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/migrations/upgrade_6_to_7.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/migrations/upgrade_6_to_7.py (original)
+++ lnt/trunk/lnt/server/db/migrations/upgrade_6_to_7.py Wed Nov 8 16:27:12 2017
@@ -8,7 +8,8 @@ import sqlalchemy
#
# Import the original schema from upgrade_0_to_1 since upgrade_6_to_7 does not
# change the actual schema.
-from lnt.server.db.migrations.upgrade_0_to_1 import SampleType, TestSuite, SampleField
+from lnt.server.db.migrations.upgrade_0_to_1 import SampleType, TestSuite, \
+ SampleField
from lnt.server.db.util import add_column
Modified: lnt/trunk/lnt/server/db/regression.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/regression.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/regression.py (original)
+++ lnt/trunk/lnt/server/db/regression.py Wed Nov 8 16:27:12 2017
@@ -22,14 +22,16 @@ class RegressionState:
FIXED = 22
# System detected it is fixed.
DETECTED_FIXED = 23
- names = {DETECTED: u'Detected',
- STAGED: u'Staged',
- ACTIVE: u'Active',
- NTBF: u'Not to be Fixed',
- IGNORED: u'Ignored',
- DETECTED_FIXED: u'Verify',
- FIXED: u'Fixed'
- }
+ names = {
+ DETECTED: u'Detected',
+ STAGED: u'Staged',
+ ACTIVE: u'Active',
+ NTBF: u'Not to be Fixed',
+ IGNORED: u'Ignored',
+ DETECTED_FIXED: u'Verify',
+ FIXED: u'Fixed',
+ }
+
ChangeRuns = namedtuple("ChangeRuns", ["before", "after"])
ChangeData = namedtuple("ChangeData", ["ri", "cr", "run", "latest_cr"])
Modified: lnt/trunk/lnt/server/db/rules/rule_testhook.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules/rule_testhook.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules/rule_testhook.py (original)
+++ lnt/trunk/lnt/server/db/rules/rule_testhook.py Wed Nov 8 16:27:12 2017
@@ -6,4 +6,5 @@ Test rule system. A simple rules, with o
def test():
return "Foo."
+
post_test_hook = test
Modified: lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py (original)
+++ lnt/trunk/lnt/server/db/rules/rule_update_fixed_regressions.py Wed Nov 8 16:27:12 2017
@@ -72,4 +72,5 @@ def regression_evolution(session, ts, ru
session.commit()
logger.info("Changed the state of {} regressions".format(changed))
+
post_submission_hook = regression_evolution
Modified: lnt/trunk/lnt/server/db/rules/rule_update_profile_stats.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules/rule_update_profile_stats.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules/rule_update_profile_stats.py (original)
+++ lnt/trunk/lnt/server/db/rules/rule_update_profile_stats.py Wed Nov 8 16:27:12 2017
@@ -22,7 +22,7 @@ def update_profile_stats(session, ts, ru
try:
history = json.loads(open(history_path).read())
- except:
+ except Exception:
history = []
age = []
@@ -41,4 +41,5 @@ def update_profile_stats(session, ts, ru
open(history_path, 'w').write(json.dumps(history))
open(age_path, 'w').write(json.dumps(age))
+
post_submission_hook = update_profile_stats
Modified: lnt/trunk/lnt/server/db/rules_manager.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/rules_manager.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/rules_manager.py (original)
+++ lnt/trunk/lnt/server/db/rules_manager.py Wed Nov 8 16:27:12 2017
@@ -44,6 +44,7 @@ def load_rules():
return rule_scripts
+
# Places our rules can hook to.
HOOKS = {
'post_test_hook': [],
Modified: lnt/trunk/lnt/server/db/search.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/search.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/search.py (original)
+++ lnt/trunk/lnt/server/db/search.py Wed Nov 8 16:27:12 2017
@@ -51,9 +51,9 @@ def _naive_search_for_run(session, ts, q
ts.Order.fields[llvm_project_revision_idx].column
q = session.query(ts.Run) \
- .filter(ts.Run.machine_id.in_(machines)) \
- .filter(ts.Run.order_id == ts.Order.id) \
- .filter(llvm_project_revision_col.isnot(None))
+ .filter(ts.Run.machine_id.in_(machines)) \
+ .filter(ts.Run.order_id == ts.Order.id) \
+ .filter(llvm_project_revision_col.isnot(None))
if order_queries:
oq = '%' + str(order_queries[0]) + '%'
q = q.filter(llvm_project_revision_col.like(oq))
Modified: lnt/trunk/lnt/server/db/testsuitedb.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/testsuitedb.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/testsuitedb.py (original)
+++ lnt/trunk/lnt/server/db/testsuitedb.py Wed Nov 8 16:27:12 2017
@@ -343,7 +343,8 @@ class TestSuiteDB(object):
# The parameters blob is used to store any additional information
# reported by the run but not promoted into the machine record.
# Such data is stored as a JSON encoded blob.
- parameters_data = Column("Parameters", Binary, index=False, unique=False)
+ parameters_data = Column("Parameters", Binary, index=False,
+ unique=False)
machine = relation(Machine)
order = relation(Order)
@@ -410,7 +411,8 @@ class TestSuiteDB(object):
class Test(self.base, ParameterizedMixin):
__tablename__ = db_key_name + '_Test'
- __table_args__ = {'mysql_collate': 'utf8_bin'} # For case sensitive compare.
+ # utf8_bin for case sensitive compare
+ __table_args__ = {'mysql_collate': 'utf8_bin'}
id = Column("ID", Integer, primary_key=True)
name = Column("Name", String(256), unique=True, index=True)
@@ -972,7 +974,9 @@ class TestSuiteDB(object):
order.name)
elif merge == 'replace':
for previous_run in existing_runs:
- logger.info("Duplicate submission for order %r: deleting previous run %r" % (order, previous_run))
+ logger.info("Duplicate submission for order %r: "
+ "deleting previous run %r" %
+ (order, previous_run))
session.delete(previous_run)
else:
raise ValueError('Invalid Run mergeStrategy %r' % merge)
Modified: lnt/trunk/lnt/server/db/v4db.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/v4db.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/v4db.py (original)
+++ lnt/trunk/lnt/server/db/v4db.py Wed Nov 8 16:27:12 2017
@@ -4,7 +4,7 @@ import sys
try:
import threading
-except:
+except Exception:
import dummy_threading as threading
import sqlalchemy
Modified: lnt/trunk/lnt/server/ui/api.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/api.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/api.py (original)
+++ lnt/trunk/lnt/server/ui/api.py Wed Nov 8 16:27:12 2017
@@ -397,7 +397,7 @@ class Graph(Resource):
abort(404)
q = session.query(field.column, ts.Order.llvm_project_revision,
- ts.Run.start_time, ts.Run.id) \
+ ts.Run.start_time, ts.Run.id) \
.join(ts.Run) \
.join(ts.Order) \
.filter(ts.Run.machine_id == machine.id) \
Modified: lnt/trunk/lnt/server/ui/app.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/app.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/app.py (original)
+++ lnt/trunk/lnt/server/ui/app.py Wed Nov 8 16:27:12 2017
@@ -272,6 +272,7 @@ class App(LNTExceptionLoggerFlask):
else:
self.config['log_file_name'] = log_file_name
+
def create_jinja_environment(env=None):
"""
create_jinja_environment([env]) -> jinja2.Environment
Modified: lnt/trunk/lnt/server/ui/globals.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/globals.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/globals.py (original)
+++ lnt/trunk/lnt/server/ui/globals.py Wed Nov 8 16:27:12 2017
@@ -31,7 +31,7 @@ def v4_url_available(*args, **kwargs):
try:
flask.g.testsuite_name
return True
- except:
+ except Exception:
return False
Modified: lnt/trunk/lnt/server/ui/profile_views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/profile_views.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/profile_views.py (original)
+++ lnt/trunk/lnt/server/ui/profile_views.py Wed Nov 8 16:27:12 2017
@@ -30,11 +30,11 @@ def profile_admin():
try:
history = json.loads(open(history_path).read())
- except:
+ except Exception:
history = []
try:
age = json.loads(open(age_path).read())
- except:
+ except Exception:
age = []
# Convert from UNIX timestamps to Javascript timestamps.
@@ -74,8 +74,8 @@ def v4_profile_ajax_getFunctions():
idx = 0
tlc = {}
sample = session.query(ts.Sample) \
- .filter(ts.Sample.run_id == runid) \
- .filter(ts.Sample.test_id == testid).first()
+ .filter(ts.Sample.run_id == runid) \
+ .filter(ts.Sample.test_id == testid).first()
if sample and sample.profile:
p = sample.profile.load(profileDir)
return json.dumps([[n, f] for n, f in p.getFunctions().items()])
@@ -122,8 +122,8 @@ def v4_profile_ajax_getCodeForFunction()
profileDir = current_app.old_config.profileDir
sample = session.query(ts.Sample) \
- .filter(ts.Sample.run_id == runid) \
- .filter(ts.Sample.test_id == testid).first()
+ .filter(ts.Sample.run_id == runid) \
+ .filter(ts.Sample.test_id == testid).first()
if not sample or not sample.profile:
abort(404)
@@ -150,13 +150,13 @@ def v4_profile(testid, run1_id, run2_id=
test = session.query(ts.Test).filter(ts.Test.id == testid).one()
run1 = session.query(ts.Run).filter(ts.Run.id == run1_id).one()
sample1 = session.query(ts.Sample) \
- .filter(ts.Sample.run_id == run1_id) \
- .filter(ts.Sample.test_id == testid).first()
+ .filter(ts.Sample.run_id == run1_id) \
+ .filter(ts.Sample.test_id == testid).first()
if run2_id is not None:
run2 = session.query(ts.Run).filter(ts.Run.id == run2_id).one()
sample2 = session.query(ts.Sample) \
- .filter(ts.Sample.run_id == run2_id) \
- .filter(ts.Sample.test_id == testid).first()
+ .filter(ts.Sample.run_id == run2_id) \
+ .filter(ts.Sample.test_id == testid).first()
else:
run2 = None
sample2 = None
Modified: lnt/trunk/lnt/server/ui/util.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/util.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/util.py (original)
+++ lnt/trunk/lnt/server/ui/util.py Wed Nov 8 16:27:12 2017
@@ -69,16 +69,16 @@ def all_false(list, predicate):
return not any_true(list, predicate)
-def mean(l):
- return sum(l) / len(l)
+def mean(values):
+ return sum(values) / len(values)
-def median(l):
- l = list(l)
- l.sort()
- N = len(l)
- return (l[(N - 1) // 2] +
- l[(N + 0) // 2]) * .5
+def median(values):
+ values = list(values)
+ values.sort()
+ N = len(values)
+ return (values[(N - 1) // 2] +
+ values[(N + 0) // 2]) * .5
def prependLines(prependStr, str):
@@ -207,10 +207,10 @@ class PctCell:
return '<td %s>%s</td>' % (attr_string, self.getValue())
-def sorted(l, *args, **kwargs):
- l = list(l)
- l.sort(*args, **kwargs)
- return l
+def sorted(values, *args, **kwargs):
+ values = list(values)
+ values.sort(*args, **kwargs)
+ return values
def renderProducerAsHTML(producer):
Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Wed Nov 8 16:27:12 2017
@@ -138,7 +138,7 @@ def _do_submit():
Info = Run.get('Info')
if Info is not None:
g.testsuite_name = Info.get('tag')
- except:
+ except Exception:
pass
if g.testsuite_name is None:
g.testsuite_name = 'nts'
@@ -390,7 +390,7 @@ class V4RequestInfo(object):
try:
self.num_comparison_runs = int(
request.args.get('num_comparison_runs'))
- except:
+ except Exception:
self.num_comparison_runs = 0
# Find the baseline run, if requested.
@@ -696,6 +696,7 @@ def v4_run_graph(id):
return redirect(v4_url_for(".v4_graph", **args))
+
BaselineLegendItem = namedtuple('BaselineLegendItem', 'name id')
LegendItem = namedtuple('LegendItem', 'machine test_name field_name color url')
@@ -704,9 +705,10 @@ LegendItem = namedtuple('LegendItem', 'm
def v4_graph_for_sample(sample_id, field_name):
"""Redirect to a graph of the data that a sample and field came from.
- When you have a sample from an API call, this can get you into the LNT graph
- page, for that sample. Extra args are passed through, to allow the caller
- to customize the graph page displayed, with for example run highlighting.
+ When you have a sample from an API call, this can get you into the LNT
+ graph page, for that sample. Extra args are passed through, to allow the
+ caller to customize the graph page displayed, with for example run
+ highlighting.
:param sample_id: the sample ID from the database, obtained from the API.
:param field_name: the name of the field.
@@ -807,17 +809,16 @@ def v4_graph():
machine_id = int(machine_id_str)
test_id = int(test_id_str)
field_index = int(field_index_str)
- except:
+ except Exception:
return abort(400)
if not (0 <= field_index < len(ts.sample_fields)):
return abort(404)
try:
- machine = \
- session.query(ts.Machine) \
- .filter(ts.Machine.id == machine_id) \
- .one()
+ machine = session.query(ts.Machine) \
+ .filter(ts.Machine.id == machine_id) \
+ .one()
test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
field = ts.sample_fields[field_index]
except NoResultFound:
@@ -874,7 +875,7 @@ def v4_graph():
run_id_str = value
try:
run_id = int(run_id_str)
- except:
+ except Exception:
return abort(400)
try:
@@ -882,7 +883,7 @@ def v4_graph():
.options(joinedload(ts.Run.machine)) \
.filter(ts.Run.id == run_id) \
.one()
- except:
+ except Exception:
err_msg = ("The run {} was not found in the database."
.format(run_id))
return render_template("error.html",
@@ -932,7 +933,7 @@ def v4_graph():
#
# FIXME: Don't hard code field name.
q = session.query(field.column, ts.Order.llvm_project_revision,
- ts.Run.start_time, ts.Run.id) \
+ ts.Run.start_time, ts.Run.id) \
.join(ts.Run).join(ts.Order) \
.filter(ts.Run.machine_id == machine.id) \
.filter(ts.Sample.test == test) \
@@ -1000,10 +1001,10 @@ def v4_graph():
q = session.query(sqlalchemy.sql.func.min(field.column),
ts.Order.llvm_project_revision,
sqlalchemy.sql.func.min(ts.Run.start_time)) \
- .join(ts.Run).join(ts.Order).join(ts.Test) \
- .filter(ts.Run.machine_id == machine.id) \
- .filter(field.column.isnot(None)) \
- .group_by(ts.Order.llvm_project_revision, ts.Test)
+ .join(ts.Run).join(ts.Order).join(ts.Test) \
+ .filter(ts.Run.machine_id == machine.id) \
+ .filter(field.column.isnot(None)) \
+ .group_by(ts.Order.llvm_project_revision, ts.Test)
# Calculate geomean of each revision.
data = multidict.multidict(
@@ -1575,17 +1576,18 @@ def v4_search():
try:
int(i)
return True
- except:
+ except Exception:
return False
session = request.session
ts = request.get_testsuite()
query = request.args.get('q')
- l = request.args.get('l', 8)
+ l_arg = request.args.get('l', 8)
default_machine = request.args.get('m', None)
assert query
- results = lnt.server.db.search.search(session, ts, query, num_results=l,
+ results = lnt.server.db.search.search(session, ts, query,
+ num_results=l_arg,
default_machine=default_machine)
return json.dumps(
@@ -1822,9 +1824,9 @@ def v4_matrix():
False)
# Calculate the date of each order.
runs = session.query(ts.Run.start_time, ts.Order.llvm_project_revision) \
- .join(ts.Order) \
- .filter(ts.Order.llvm_project_revision.in_(all_orders)) \
- .all()
+ .join(ts.Order) \
+ .filter(ts.Order.llvm_project_revision.in_(all_orders)) \
+ .all()
order_to_date = dict([(x[1], x[0]) for x in runs])
Modified: lnt/trunk/lnt/testing/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/__init__.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/__init__.py (original)
+++ lnt/trunk/lnt/testing/__init__.py Wed Nov 8 16:27:12 2017
@@ -267,6 +267,7 @@ class _UpgradeSchema(object):
self.machine_param_rename = machine_param_rename
self.run_param_rename = run_param_rename
+
_nts_upgrade = _UpgradeSchema(
metric_rename={
'.code_size': 'code_size',
@@ -432,4 +433,5 @@ def upgrade_report(data, ts_name):
assert(format_version == 2)
return data
+
__all__ = ['Report', 'Machine', 'Run', 'TestSamples']
Modified: lnt/trunk/lnt/testing/profile/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/__init__.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/__init__.py (original)
+++ lnt/trunk/lnt/testing/profile/__init__.py Wed Nov 8 16:27:12 2017
@@ -1,4 +1,5 @@
-# This is the profile implementation registry. Register new profile implementations here.
+# This is the profile implementation registry. Register new profile
+# implementations here.
from profilev1impl import ProfileV1
from profilev2impl import ProfileV2
Modified: lnt/trunk/lnt/testing/profile/perf.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/perf.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/perf.py (original)
+++ lnt/trunk/lnt/testing/profile/perf.py Wed Nov 8 16:27:12 2017
@@ -1,13 +1,16 @@
-import json, os, traceback
+from lnt.util import logger
from profile import ProfileImpl
from profilev1impl import ProfileV1
-from lnt.util import logger
+import json
+import os
+import traceback
try:
import cPerf
-except:
+except Exception:
pass
+
class LinuxPerfProfile(ProfileImpl):
def __init__(self):
pass
@@ -15,11 +18,11 @@ class LinuxPerfProfile(ProfileImpl):
@staticmethod
def checkFile(fn):
return open(fn).read(8) == 'PERFILE2'
-
+
@staticmethod
def deserialize(f, nm='nm', objdump='objdump', propagateExceptions=False):
f = f.name
-
+
if os.path.getsize(f) == 0:
# Empty file - exit early.
return None
@@ -31,14 +34,14 @@ class LinuxPerfProfile(ProfileImpl):
for f in data['functions'].values():
fc = f['counters']
for l in f['data']:
- for k,v in l[0].items():
+ for k, v in l[0].items():
l[0][k] = 100.0 * float(v) / fc[k]
- for k,v in fc.items():
+ for k, v in fc.items():
fc[k] = 100.0 * v / data['counters'][k]
return ProfileV1(data)
- except:
+ except Exception:
if propagateExceptions:
raise
logger.warning(traceback.format_exc())
Modified: lnt/trunk/lnt/testing/profile/profile.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/profile.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/profile.py (original)
+++ lnt/trunk/lnt/testing/profile/profile.py Wed Nov 8 16:27:12 2017
@@ -1,5 +1,8 @@
-import os, tempfile, base64
+import base64
import lnt.testing.profile
+import os
+import tempfile
+
class Profile(object):
"""Profile objects hold a performance profile.
@@ -10,7 +13,7 @@ class Profile(object):
"""
# Import this late to avoid a cyclic dependency
import lnt.testing.profile
-
+
def __init__(self, impl):
"""Internal constructor. Users should not call this; use fromFile or
fromRendered."""
@@ -44,7 +47,7 @@ class Profile(object):
# Rewind to beginning.
fd.flush()
fd.seek(0)
-
+
for impl in lnt.testing.profile.IMPLEMENTATIONS.values():
if impl.checkFile(fd.name):
ret = impl.deserialize(fd)
@@ -79,7 +82,7 @@ class Profile(object):
else:
open(filename, 'w').write(s)
return filename
-
+
def save(self, filename=None, profileDir=None, prefix=''):
"""
Save a profile. One of 'filename' or 'profileDir' must be given.
@@ -125,7 +128,8 @@ class Profile(object):
new_version = version + 1
if new_version not in lnt.testing.profile.IMPLEMENTATIONS:
return self
- self.impl = lnt.testing.profile.IMPLEMENTATIONS[new_version].upgrade(self.impl)
+ new_impl = lnt.testing.profile.IMPLEMENTATIONS[new_version]
+ self.impl = new_impl.upgrade(self.impl)
#
# ProfileImpl facade - see ProfileImpl documentation below.
@@ -133,49 +137,51 @@ class Profile(object):
def getVersion(self):
return self.impl.getVersion()
-
+
def getTopLevelCounters(self):
return self.impl.getTopLevelCounters()
def getDisassemblyFormat(self):
return self.impl.getDisassemblyFormat()
-
+
def getFunctions(self):
return self.impl.getFunctions()
def getCodeForFunction(self, fname):
return self.impl.getCodeForFunction(fname)
-################################################################################
class ProfileImpl(object):
@staticmethod
def upgrade(old):
"""
- Takes a previous profile implementation in 'old' and returns a new ProfileImpl
- for this version. The only old version that must be supported is the immediately
- prior version (e.g. version 3 only has to handle upgrades from version 2.
+ Takes a previous profile implementation in 'old' and returns a new
+ ProfileImpl for this version. The only old version that must be
+ supported is the immediately prior version (e.g. version 3 only has to
+ handle upgrades from version 2.
"""
raise NotImplementedError("Abstract class")
@staticmethod
def checkFile(fname):
"""
- Return True if 'fname' is a serialized version of this profile implementation.
+ Return True if 'fname' is a serialized version of this profile
+ implementation.
"""
raise NotImplementedError("Abstract class")
-
+
@staticmethod
def deserialize(fobj):
"""
- Reads a profile from 'fobj', returning a new profile object. This can be lazy.
+ Reads a profile from 'fobj', returning a new profile object. This can
+ be lazy.
"""
raise NotImplementedError("Abstract class")
def serialize(self, fname=None):
"""
- Serializes the profile to the given filename (base). If fname is None, returns
- as a bytes instance.
+ Serializes the profile to the given filename (base). If fname is None,
+ returns as a bytes instance.
"""
raise NotImplementedError("Abstract class")
@@ -187,48 +193,56 @@ class ProfileImpl(object):
def getTopLevelCounters(self):
"""
- Return a dict containing the counters for the entire profile. These will
- be absolute numbers: ``{'cycles': 5000.0}`` for example.
+ Return a dict containing the counters for the entire profile. These
+ will be absolute numbers: ``{'cycles': 5000.0}`` for example.
"""
raise NotImplementedError("Abstract class")
def getDisassemblyFormat(self):
"""
- Return the format for the disassembly strings returned by getCodeForFunction().
- Possible values are:
+ Return the format for the disassembly strings returned by
+ getCodeForFunction(). Possible values are:
- * ``raw`` - No interpretation available - pure strings.
+ * ``raw`` - No interpretation available;
+ pure strings.
* ``marked-up-disassembly`` - LLVM marked up disassembly format.
"""
raise NotImplementedError("Abstract class")
-
+
def getFunctions(self):
"""
- Return a dict containing function names to information about that function.
+ Return a dict containing function names to information about that
+ function.
The information dict contains:
* ``counters`` - counter values for the function.
- * ``length`` - number of times to call getCodeForFunction to obtain all instructions.
+ * ``length`` - number of times to call getCodeForFunction to obtain all
+ instructions.
The dict should *not* contain disassembly / function contents.
The counter values must be percentages, not absolute numbers.
E.g.::
- {'main': {'counters': {'cycles': 50.0, 'branch-misses': 0}, 'length': 200},
- 'dotest': {'counters': {'cycles': 50.0, 'branch-misses': 0}, 'length': 4}}
+ {'main': {'counters': {'cycles': 50.0, 'branch-misses': 0},
+ 'length': 200},
+ 'dotest': {'counters': {'cycles': 50.0, 'branch-misses': 0},
+ 'length': 4}
+ }
"""
raise NotImplementedError("Abstract class")
def getCodeForFunction(self, fname):
"""
- Return a *generator* which will return, for every invocation, a three-tuple::
+ Return a *generator* which will return, for every invocation, a
+ three-tuple::
(counters, address, text)
Where counters is a dict : (e.g.) ``{'cycles': 50.0}``, text is in the
- format as returned by getDisassemblyFormat(), and address is an integer.
+ format as returned by getDisassemblyFormat(), and address is an
+ integer.
The counter values must be percentages (of the function total), not
absolute numbers.
Modified: lnt/trunk/lnt/testing/profile/profilev1impl.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/profilev1impl.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/profilev1impl.py (original)
+++ lnt/trunk/lnt/testing/profile/profilev1impl.py Wed Nov 8 16:27:12 2017
@@ -1,5 +1,7 @@
from lnt.testing.profile.profile import ProfileImpl
-import cPickle, zlib
+import cPickle
+import zlib
+
class ProfileV1(ProfileImpl):
"""
@@ -7,12 +9,13 @@ ProfileV1 files not clever in any way. T
the profile data layed out in the most obvious way for production/consumption
that are then pickled and compressed.
-They are expected to be created by simply storing into the ``self.data`` member.
+They are expected to be created by simply storing into the ``self.data``
+member.
The ``self.data`` member has this format::
{
- counters: {'cycles': 12345.0, 'branch-misses': 200.0}, # Counter values are absolute.
+ counters: {'cycles': 12345.0, 'branch-misses': 200.0}, # absolute values.
disassembly-format: 'raw',
functions: {
name: {
@@ -28,10 +31,11 @@ The ``self.data`` member has this format
def __init__(self, data):
"""
- Create from a raw data dict. data has the format given in the class docstring.
+ Create from a raw data dict. data has the format given in the class
+ docstring.
"""
self.data = data
-
+
@staticmethod
def upgrade(old):
raise RuntimeError("Cannot upgrade to version 1!")
@@ -67,7 +71,7 @@ The ``self.data`` member has this format
if 'disassembly-format' in self.data:
return self.data['disassembly-format']
return 'raw'
-
+
def getFunctions(self):
d = {}
for fn in self.data['functions']:
Modified: lnt/trunk/lnt/testing/profile/profilev2impl.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/profile/profilev2impl.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/profile/profilev2impl.py (original)
+++ lnt/trunk/lnt/testing/profile/profilev2impl.py Wed Nov 8 16:27:12 2017
@@ -1,5 +1,10 @@
-import struct, bz2, os, StringIO, copy, io
from profile import ProfileImpl
+import StringIO
+import bz2
+import copy
+import io
+import os
+import struct
"""
ProfileV2 is a profile data representation designed to keep the
@@ -28,13 +33,14 @@ The sections are:
Functions
For each function, contains the counters for that function, the number
- of instructions and indices into the LineAddresses, LineCounters and LineText
- sections.
+ of instructions and indices into the LineAddresses, LineCounters and
+ LineText sections.
LineAddresses
- A flat list of numbers, addresses are encoded as offsets from the previous
- address. Each Function knows its starting index into this list (the first
- address is an offset from zero) and how many addresses to read.
+ A flat list of numbers, addresses are encoded as offsets from the
+ previous address. Each Function knows its starting index into this list
+ (the first address is an offset from zero) and how many addresses to
+ read.
LineCounters
A list of floating point values, one for each counter in the counter name
@@ -43,37 +49,40 @@ The sections are:
Like LineAddresses, Functions know their own index into the LineCounter
table.
- The numbers are floating point numbers that are bitconverted into integers.
+ The numbers are floating point numbers that are bitconverted into
+ integers.
LineText
- A list of offsets into the TextPool, which is a simple string pool. Again,
- Functions know their index into the LineText section.
+ A list of offsets into the TextPool, which is a simple string pool.
+ Again, Functions know their index into the LineText section.
TextPool
A simple string pool.
- The LineAddresses and LineCounters sections are designed to hold very repetitive
- data that is very easy to compress. The LineText section allows repeated strings
- to be reused (for example 'add r0, r0, r0').
-
- The LineAddresses, LineCounters, LineText and TextPool sections are BZ2 compressed.
-
- The TextPool section has the ability to be shared across multiple profiles to
- take advantage of inter-run redundancy (the image very rarely changes substantially).
- This pooling ability is not yet implemented but the appropriate scaffolding is in
- place.
-
- The ProfileV2 format gives a ~3x size improvement over the ProfileV1 (which is also
- compressed) - meaning a ProfileV2 is roughly 1/3 the size of ProfileV1. With text
- pooling, ProfileV2s can be even smaller.
+ The LineAddresses and LineCounters sections are designed to hold very
+ repetitive data that is very easy to compress. The LineText section allows
+ repeated strings to be reused (for example 'add r0, r0, r0').
+
+ The LineAddresses, LineCounters, LineText and TextPool sections are BZ2
+ compressed.
+
+ The TextPool section has the ability to be shared across multiple profiles
+ to take advantage of inter-run redundancy (the image very rarely changes
+ substantially). This pooling ability is not yet implemented but the
+ appropriate scaffolding is in place.
+
+ The ProfileV2 format gives a ~3x size improvement over the ProfileV1 (which
+ is also compressed) - meaning a ProfileV2 is roughly 1/3 the size of
+ ProfileV1. With text pooling, ProfileV2s can be even smaller.
- Not only this, but for the simple task of enumerating the functions in a profile
- we do not need to do any decompression at all.
+ Not only this, but for the simple task of enumerating the functions in a
+ profile we do not need to do any decompression at all.
"""
-################################################################################
+##############################################################################
# Utility functions
+
def readNum(fobj):
"""
Reads a ULEB encoded number from a stream.
@@ -87,11 +96,11 @@ def readNum(fobj):
if (b & 0x80) == 0:
return n
+
def writeNum(fobj, n):
"""
Write 'n' as a ULEB encoded number to a stream.
"""
- l = []
while True:
b = n & 0x7F
n >>= 7
@@ -102,12 +111,14 @@ def writeNum(fobj, n):
if n == 0:
break
+
def readString(fobj):
"""
Read a string from a stream.
"""
return fobj.readline()[:-1]
-
+
+
def writeString(fobj, s):
"""
Write a string to a stream.
@@ -115,15 +126,17 @@ def writeString(fobj, s):
fobj.write(str(s))
fobj.write('\n')
+
def readFloat(fobj):
"""
Read a floating point number from a stream.
"""
- l = readNum(fobj)
- packed = struct.pack('>l', l)
+ num = readNum(fobj)
+ packed = struct.pack('>l', num)
f = struct.unpack('>f', packed)[0]
return f
+
def writeFloat(fobj, f):
"""
Write a floating point number to a stream.
@@ -135,9 +148,10 @@ def writeFloat(fobj, f):
bits = struct.unpack('>l', packed)[0]
writeNum(fobj, bits)
-################################################################################
+##############################################################################
# Abstract section types
+
class Section(object):
def writeHeader(self, fobj, offset, size):
writeNum(fobj, offset)
@@ -163,17 +177,19 @@ class Section(object):
def copy(self):
return copy.copy(self)
+
class CompressedSection(Section):
def read(self, fobj):
fobj.seek(self.offset + self.start)
_io = StringIO.StringIO(bz2.decompress(fobj.read(self.size)))
return self.deserialize(_io)
-
+
def write(self, fobj):
_io = io.BytesIO()
self.serialize(_io)
fobj.write(bz2.compress(_io.getvalue()))
+
class MaybePooledSection(Section):
"""
A section that is normally compressed, but can optionally be
@@ -199,7 +215,7 @@ class MaybePooledSection(Section):
_io = StringIO.StringIO(bz2.decompress(fobj.read(self.size)))
self.size = len(_io.getvalue())
return self.deserialize(_io)
-
+
def write(self, fobj):
_io = StringIO.StringIO()
if self.pool_fname:
@@ -209,9 +225,10 @@ class MaybePooledSection(Section):
Section.write(self, _io)
fobj.write(bz2.compress(_io.getvalue()))
-################################################################################
+##############################################################################
# Concrete section types
-
+
+
class Header(Section):
def serialize(self, fobj):
writeString(fobj, self.disassembly_format)
@@ -225,15 +242,16 @@ class Header(Section):
def __repr__(self):
pass
+
class CounterNamePool(Section):
"""
- Maps counter names to indices. It allows later sections to refer to
+ Maps counter names to indices. It allows later sections to refer to
counters by index.
"""
def serialize(self, fobj):
- l = len(self.idx_to_name)
- writeNum(fobj, l)
- for i in xrange(l):
+ n_names = len(self.idx_to_name)
+ writeNum(fobj, n_names)
+ for i in xrange(n_names):
writeString(fobj, self.idx_to_name[i])
def deserialize(self, fobj):
@@ -241,7 +259,7 @@ class CounterNamePool(Section):
for i in xrange(readNum(fobj)):
self.idx_to_name[i] = readString(fobj)
self.name_to_idx = {v: k
- for k,v
+ for k, v
in self.idx_to_name.items()}
def upgrade(self, impl):
@@ -253,9 +271,10 @@ class CounterNamePool(Section):
keys += f['counters'].keys()
keys = sorted(set(keys))
- self.idx_to_name = {k: v for k,v in enumerate(keys)}
- self.name_to_idx = {v: k for k,v in enumerate(keys)}
-
+ self.idx_to_name = {k: v for k, v in enumerate(keys)}
+ self.name_to_idx = {v: k for k, v in enumerate(keys)}
+
+
class TopLevelCounters(Section):
def __init__(self, counter_name_pool):
self.counter_name_pool = counter_name_pool
@@ -280,7 +299,8 @@ class TopLevelCounters(Section):
new = copy.copy(self)
new.counter_name_pool = cnp
return new
-
+
+
class LineCounters(CompressedSection):
def __init__(self, impl=None):
self.impl = impl
@@ -304,9 +324,10 @@ class LineCounters(CompressedSection):
def upgrade(self, impl):
self.impl = impl
self.function_offsets = {}
-
+
def getOffsetFor(self, fname):
return self.function_offsets[fname]
+
def setOffsetFor(self, fname, value):
self.function_offsets[fname] = value
@@ -320,12 +341,13 @@ class LineCounters(CompressedSection):
for k in counters:
c[k] = readFloat(_io)
yield c
-
+
+
class LineAddresses(CompressedSection):
def __init__(self, impl=None):
self.impl = impl
self.function_offsets = {}
-
+
def serialize(self, fobj):
"""
Addresses are encoded as a delta from the previous address. This allows
@@ -340,11 +362,12 @@ class LineAddresses(CompressedSection):
self.function_offsets[fname] = fobj.tell() - start
prev_address = 0
for counters, address, text in self.impl.getCodeForFunction(fname):
- # FIXME: Hack around a bug in perf extraction somewhere - if we go
- # off the end of a symbol to a previous symbol, addresses will go backwards!
+ # FIXME: Hack around a bug in perf extraction somewhere - if
+ # we go off the end of a symbol to a previous symbol,
+ # addresses will go backwards!
writeNum(fobj, max(0, address - prev_address))
prev_address = address
-
+
def deserialize(self, fobj):
self.data = fobj.read()
@@ -354,6 +377,7 @@ class LineAddresses(CompressedSection):
def getOffsetFor(self, fname):
return self.function_offsets[fname]
+
def setOffsetFor(self, fname, value):
self.function_offsets[fname] = value
@@ -366,13 +390,14 @@ class LineAddresses(CompressedSection):
address = readNum(_io) + last_address
last_address = address
yield address
-
+
+
class LineText(CompressedSection):
"""
Text lines (like "add r0, r0, r0") can be repeated.
Instead of just storing the text in raw form, we store pointers into
- a text pool. This allows text to be reused, but also reused between
+ a text pool. This allows text to be reused, but also reused between
different profiles if required (the text pools can be extracted
into a separate file)
"""
@@ -391,7 +416,7 @@ class LineText(CompressedSection):
self.function_offsets[fname] = fobj.tell() - start
for counters, address, text in self.impl.getCodeForFunction(fname):
writeNum(fobj, self.text_pool.getOrCreate(text))
- writeNum(fobj, 0) # Write sequence terminator
+ writeNum(fobj, 0) # Write sequence terminator
def deserialize(self, fobj):
# FIXME: Make this lazy.
@@ -400,9 +425,10 @@ class LineText(CompressedSection):
def upgrade(self, impl):
self.impl = impl
self.function_offsets = {}
-
+
def getOffsetFor(self, fname):
return self.function_offsets[fname]
+
def setOffsetFor(self, fname, value):
self.function_offsets[fname] = value
@@ -419,7 +445,8 @@ class LineText(CompressedSection):
new = copy.copy(self)
new.text_pool = tp
return new
-
+
+
class TextPool(MaybePooledSection):
def __init__(self):
MaybePooledSection.__init__(self)
@@ -440,12 +467,12 @@ class TextPool(MaybePooledSection):
def upgrade(self, impl):
pass
-
+
def getOrCreate(self, text):
if self.pool_fname and not self.pool_read:
assert False
self.readFromPool()
-
+
if text in self.offsets:
return self.offsets[text]
self.offsets[text] = self.data.tell()
@@ -458,7 +485,8 @@ class TextPool(MaybePooledSection):
def copy(self):
return copy.deepcopy(self)
-
+
+
class Functions(Section):
def __init__(self, counter_name_pool, line_counters,
line_addresses, line_text, impl=None):
@@ -508,7 +536,8 @@ class Functions(Section):
def getCodeForFunction(self, fname):
f = self.functions[fname]
- counter_gen = self.line_counters.extractForFunction(fname, f['counters'].keys())
+ counter_gen = self.line_counters \
+ .extractForFunction(fname, f['counters'].keys())
address_gen = self.line_addresses.extractForFunction(fname)
text_gen = self.line_text.extractForFunction(fname)
for n in xrange(f['length']):
@@ -522,11 +551,13 @@ class Functions(Section):
new.line_addresses = line_addresses
new.line_text = line_text
return new
-
+
+
class ProfileV2(ProfileImpl):
@staticmethod
def checkFile(fn):
- # The first number is the version (2); ULEB encoded this is simply 0x02.
+ # The first number is the version (2); ULEB encoded this is simply
+ # 0x02.
return ord(open(fn).read(1)) == 2
@staticmethod
@@ -541,9 +572,9 @@ class ProfileV2(ProfileImpl):
p.tp = TextPool()
p.lt = LineText(p.tp, p)
p.f = Functions(p.cnp, p.lc, p.la, p.lt, p)
-
+
p.sections = [p.h, p.cnp, p.tlc, p.lc, p.la, p.lt, p.tp, p.f]
-
+
version = readNum(fobj)
assert version == 2
@@ -555,7 +586,7 @@ class ProfileV2(ProfileImpl):
section.read(fobj)
return p
-
+
def serialize(self, fname=None):
# If we're not writing to a file, emulate a file object instead.
if fname is None:
@@ -577,7 +608,7 @@ class ProfileV2(ProfileImpl):
f = self.f.copy(cnp, lc, la, lt)
sections = [h, cnp, tlc, lc, la, lt, tp, f]
- writeNum(fobj, 2) # Version
+ writeNum(fobj, 2) # Version
# We need to write all sections first, so we know their offset
# before we write the header.
@@ -601,7 +632,7 @@ class ProfileV2(ProfileImpl):
assert v1impl.getVersion() == 1
p = ProfileV2()
-
+
p.h = Header()
p.cnp = CounterNamePool()
p.tlc = TopLevelCounters(p.cnp)
@@ -610,7 +641,7 @@ class ProfileV2(ProfileImpl):
p.tp = TextPool()
p.lt = LineText(p.tp, p)
p.f = Functions(p.cnp, p.lc, p.la, p.lt, p)
-
+
p.sections = [p.h, p.cnp, p.tlc, p.lc, p.la, p.lt, p.tp, p.f]
for section in p.sections:
Modified: lnt/trunk/lnt/testing/util/compilers.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/util/compilers.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/util/compilers.py (original)
+++ lnt/trunk/lnt/testing/util/compilers.py Wed Nov 8 16:27:12 2017
@@ -322,6 +322,7 @@ def infer_cxx_compiler(cc_path):
if os.path.exists(cxx_path):
return cxx_path
+
o__all__ = ['get_cc_info', 'infer_cxx_compiler']
Modified: lnt/trunk/lnt/tests/builtintest.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/builtintest.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/builtintest.py (original)
+++ lnt/trunk/lnt/tests/builtintest.py Wed Nov 8 16:27:12 2017
@@ -79,8 +79,8 @@ class BuiltinTest(object):
else:
server_report = ImportData.no_submit()
if server_report:
- ImportData.print_report_result(server_report, sys.stdout, sys.stderr,
- config.verbose)
+ ImportData.print_report_result(server_report, sys.stdout,
+ sys.stderr, config.verbose)
return server_report
@staticmethod
Modified: lnt/trunk/lnt/tests/compile.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/compile.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/compile.py (original)
+++ lnt/trunk/lnt/tests/compile.py Wed Nov 8 16:27:12 2017
@@ -97,7 +97,7 @@ def runN(args, N, cwd, preprocess_cmd=No
# Otherwise, parse the timing data from runN.
try:
return eval(stdout)
- except:
+ except Exception:
fatal("failed to parse output: %s\n" % stdout)
@@ -113,7 +113,8 @@ def get_output_path(*names):
def get_runN_test_data(name, variables, cmd, ignore_stderr=False,
sample_mem=False, only_mem=False,
- stdout=None, stderr=None, preprocess_cmd=None, env=None):
+ stdout=None, stderr=None, preprocess_cmd=None,
+ env=None):
if only_mem and not sample_mem:
raise ArgumentError("only_mem doesn't make sense without sample_mem")
@@ -168,7 +169,8 @@ def test_cc_command(base_name, run_info,
# extra run just to get the memory statistics.
if opts.memory_profiling:
# Find the cc1 command, which we use to do memory profiling. To do this
- # we execute the compiler with '-###' to figure out what it wants to do.
+ # we execute the compiler with '-###' to figure out what it wants to
+ # do.
cc_output = commands.capture(cmd + ['-o', '/dev/null', '-###'],
include_stderr=True).strip()
cc_commands = []
@@ -208,8 +210,8 @@ def test_cc_command(base_name, run_info,
success = True
# For now, the way the software is set up things are going to get
- # confused if we don't report the same number of samples as reported
- # for other variables. So we just report the size N times.
+ # confused if we don't report the same number of samples as
+ # reported for other variables. So we just report the size N times.
#
# FIXME: We should resolve this, eventually.
for i in range(variables.get('run_count')):
@@ -231,20 +233,20 @@ IRGEN = "irgen"
CODEGEN = "codegen"
ASSEMBLY = "assembly"
-STAGE_TO_FLAG_MAP = {PCH_GEN: Stage(flags=['-x', 'objective-c-header'], has_output=True),
- DRIVER: Stage(flags=['-###', '-fsyntax-only'], has_output=False),
- INIT: Stage(flags=['-fsyntax-only',
- '-Xclang', '-init-only'],
- has_output=False),
- SYNTAX: Stage(flags=['-fsyntax-only'], has_output=False),
- IRGEN_ONLY: Stage(flags=['-emit-llvm', '-c',
- '-Xclang', '-emit-llvm-only'],
- has_output=False),
- IRGEN: Stage(flags=['-emit-llvm', '-c'], has_output=True),
- CODEGEN: Stage(flags=['-c', '-Xclang', '-emit-codegen-only'],
- has_output=False),
- # Object would be better name. Keep for backwards compat.
- ASSEMBLY: Stage(flags=['-c'], has_output=True)}
+STAGE_TO_FLAG_MAP = {
+ PCH_GEN: Stage(flags=['-x', 'objective-c-header'], has_output=True),
+ DRIVER: Stage(flags=['-###', '-fsyntax-only'], has_output=False),
+ INIT: Stage(flags=['-fsyntax-only', '-Xclang', '-init-only'],
+ has_output=False),
+ SYNTAX: Stage(flags=['-fsyntax-only'], has_output=False),
+ IRGEN_ONLY: Stage(flags=['-emit-llvm', '-c', '-Xclang', '-emit-llvm-only'],
+ has_output=False),
+ IRGEN: Stage(flags=['-emit-llvm', '-c'], has_output=True),
+ CODEGEN: Stage(flags=['-c', '-Xclang', '-emit-codegen-only'],
+ has_output=False),
+ # Object would be better name. Keep for backwards compat.
+ ASSEMBLY: Stage(flags=['-c'], has_output=True),
+}
def test_compile(name, run_info, variables, input, output, pch_input,
@@ -400,10 +402,10 @@ def test_build(base_name, run_info, vari
# Add the build configuration selection.
cmd.extend(('-configuration', build_config))
- cmd.append('OBJROOT=%s' % (os.path.join(build_base, 'obj')))
- cmd.append('SYMROOT=%s' % (os.path.join(build_base, 'sym')))
- cmd.append('DSTROOT=%s' % (os.path.join(build_base, 'dst')))
- cmd.append('SHARED_PRECOMPS_DIR=%s' % (os.path.join(build_base, 'pch')))
+ cmd.append('OBJROOT=%s' % os.path.join(build_base, 'obj'))
+ cmd.append('SYMROOT=%s' % os.path.join(build_base, 'sym'))
+ cmd.append('DSTROOT=%s' % os.path.join(build_base, 'dst'))
+ cmd.append('SHARED_PRECOMPS_DIR=%s' % os.path.join(build_base, 'pch'))
# Add arguments to force the appropriate compiler.
cmd.append('CC=%s' % (opts.cc,))
@@ -419,15 +421,16 @@ def test_build(base_name, run_info, vari
# (we don't want to obscure what we are trying to time).
cmd.append('RUN_CLANG_STATIC_ANALYZER=NO')
- # Inhibit all warnings, we don't want to count the time to generate them
- # against newer compilers which have added (presumably good) warnings.
+ # Inhibit all warnings, we don't want to count the time to generate
+ # them against newer compilers which have added (presumably good)
+ # warnings.
cmd.append('GCC_WARN_INHIBIT_ALL_WARNINGS=YES')
# Add additional arguments to force the build scenario we want.
cmd.extend(('-jobs', str(num_jobs)))
- # If the user specifies any additional options to be included on the command line,
- # append them here.
+ # If the user specifies any additional options to be included on the
+ # command line, append them here.
cmd.extend(build_info.get('extra_args', []))
# If the user specifies any extra environment variables, put
@@ -445,18 +448,21 @@ def test_build(base_name, run_info, vari
elif build_info['style'] == 'make':
# Get the subdirectory in Source where our sources exist.
- src_dir = os.path.dirname(os.path.join(source_path, build_info['file']))
- # Grab our config from build_info. This is config is currently only used in
- # the make build style since Xcode, the only other build style as of today,
- # handles changing configuration through the configuration type variables.
- # Make does not do this so we have to use more brute force to get it right.
+ src_dir = os.path.dirname(os.path.join(source_path,
+ build_info['file']))
+ # Grab our config from build_info. This is config is currently only
+ # used in the make build style since Xcode, the only other build style
+ # as of today, handles changing configuration through the configuration
+ # type variables. Make does not do this so we have to use more brute
+ # force to get it right.
config = build_info.get('config', {}).get(build_config, {})
# Copy our source directory over to build_base.
- # We do this since we assume that we are processing a make project which
- # has already been configured and so that we do not need to worry about
- # make install or anything like that. We can just build the project and
- # use the user supplied path to its location in the build directory.
+ # We do this since we assume that we are processing a make project
+ # which has already been configured and so that we do not need to worry
+ # about make install or anything like that. We can just build the
+ # project and use the user supplied path to its location in the build
+ # directory.
copied_src_dir = os.path.join(build_base, os.path.basename(dir_name))
shutil.copytree(src_dir, copied_src_dir)
@@ -464,8 +470,8 @@ def test_build(base_name, run_info, vari
cmd.extend(['make', '-C', copied_src_dir, build_info['target'], "-j",
str(num_jobs)])
- # If the user specifies any additional options to be included on the command line,
- # append them here.
+ # If the user specifies any additional options to be included on the
+ # command line, append them here.
cmd.extend(config.get('extra_args', []))
# If the user specifies any extra environment variables, put
@@ -510,9 +516,10 @@ def test_build(base_name, run_info, vari
samples = []
try:
- # We use a dictionary here for our formatted processing of binary_path so
- # that if the user needs our build config he can get it via %(build_config)s
- # in his string and if he does not, an error is not thrown.
+ # We use a dictionary here for our formatted processing of
+ # binary_path so that if the user needs our build config he can get
+ # it via %(build_config)s in his string and if he does not, an
+ # error is not thrown.
format_args = {"build_config": build_config}
cmd = codesize_util + [os.path.join(build_base,
binary_path % format_args)]
@@ -524,9 +531,10 @@ def test_build(base_name, run_info, vari
bytes = long(result)
success = True
- # For now, the way the software is set up things are going to get
- # confused if we don't report the same number of samples as reported
- # for other variables. So we just report the size N times.
+ # For now, the way the software is set up things are going to
+ # get confused if we don't report the same number of samples
+ # as reported for other variables. So we just report the size
+ # N times.
#
# FIXME: We should resolve this, eventually.
for i in range(variables.get('run_count')):
@@ -603,9 +611,9 @@ def get_single_file_tests(flags_to_test,
# the nature of python generators I can not just return in the previous
# warning case.
for f in flags_to_test:
- # FIXME: Note that the order matters here, because we need to make sure
- # to generate the right PCH file before we try to use it. Ideally the
- # testing infrastructure would just handle this.
+ # FIXME: Note that the order matters here, because we need to make
+ # sure to generate the right PCH file before we try to use it.
+ # Ideally the testing infrastructure would just handle this.
for pch in all_pch:
path, name, output = pch['path'], pch['name'], pch['output']
@@ -688,10 +696,10 @@ TODO:
o PCH Utilization
FIXME: One major hole here is that we aren't testing one situation which does
-sometimes show up with PCH, where we have a PCH file + a second significant body
-of code (e.g., a large user framework, or a poorly PCHified project). In
-practice, this can be a significant hole because PCH has a substantial impact on
-how lookup, for example, is done.
+sometimes show up with PCH, where we have a PCH file + a second significant
+body of code (e.g., a large user framework, or a poorly PCHified project). In
+practice, this can be a significant hole because PCH has a substantial impact
+on how lookup, for example, is done.
We run each of the tests above in a number of dimensions:
- O0
@@ -728,7 +736,6 @@ class CompileTest(builtintest.BuiltinTes
if opts.cxx is None:
self._fatal('--cxx is required (and could not be inferred)')
-
# Force the CC and CXX variables to be absolute paths.
cc_abs = os.path.abspath(commands.which(opts.cc))
cxx_abs = os.path.abspath(commands.which(opts.cxx))
@@ -778,22 +785,25 @@ class CompileTest(builtintest.BuiltinTes
def setup_log(output_dir):
def stderr_log_handler():
h = logging.StreamHandler()
- f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
- "%Y-%m-%d %H:%M:%S")
+ f = logging.Formatter(
+ "%(asctime)-7s: %(levelname)s: %(message)s",
+ "%Y-%m-%d %H:%M:%S")
h.setFormatter(f)
return h
def file_log_handler(path):
h = logging.FileHandler(path, mode='w')
- f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
- "%Y-%m-%d %H:%M:%S")
+ f = logging.Formatter(
+ "%(asctime)-7s: %(levelname)s: %(message)s",
+ "%Y-%m-%d %H:%M:%S")
h.setFormatter(f)
return h
- l = logging.Logger('compile_test')
- l.setLevel(logging.INFO)
- l.addHandler(file_log_handler(os.path.join(output_dir, 'test.log')))
- l.addHandler(stderr_log_handler())
- return l
+ log = logging.Logger('compile_test')
+ log.setLevel(logging.INFO)
+ log.addHandler(file_log_handler(os.path.join(output_dir,
+ 'test.log')))
+ log.addHandler(stderr_log_handler())
+ return log
g_log = setup_log(g_output_dir)
# Collect machine and run information.
@@ -805,7 +815,8 @@ class CompileTest(builtintest.BuiltinTes
# FIXME: Get more machine information? Cocoa.h hash, for example.
for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
- ('sys_as_version', ('/usr/bin/as', '-v', '/dev/null')),
+ ('sys_as_version',
+ ('/usr/bin/as', '-v', '/dev/null')),
('sys_ld_version', ('/usr/bin/ld', '-v')),
('sys_xcodebuild', ('xcodebuild', '-version'))):
run_info[name] = commands.capture(cmd, include_stderr=True).strip()
@@ -909,8 +920,8 @@ class CompileTest(builtintest.BuiltinTes
for filter in test_filters
if filter.search(test[0])])]
if not tests_to_run:
- self._fatal(
- "no tests requested (invalid --test or --test-filter options)!")
+ self._fatal("no tests requested "
+ "(invalid --test or --test-filter options)!")
# Ensure output directory is available.
if not os.path.exists(g_output_dir):
@@ -971,6 +982,7 @@ class CompileTest(builtintest.BuiltinTes
return server_report
+
# FIXME: an equivalent to argparse's add_argument_group is not implemented
# on click. Need to review it when such functionality is available.
# https://github.com/pallets/click/issues/373
Modified: lnt/trunk/lnt/tests/nt.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/nt.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/nt.py (original)
+++ lnt/trunk/lnt/tests/nt.py Wed Nov 8 16:27:12 2017
@@ -68,7 +68,8 @@ class TestModule(object):
print >>self.log, cmdstr
self._log.flush()
- p = subprocess.Popen(args, stdout=self._log, stderr=self._log, **kwargs)
+ p = subprocess.Popen(args, stdout=self._log, stderr=self._log,
+ **kwargs)
return p.wait()
def get_time(self):
@@ -346,8 +347,8 @@ class TestConfiguration(object):
make_variables['LD_ENV_OVERRIDES'] = (
'env DYLD_LIBRARY_PATH=%s' % os.path.dirname(
self.liblto_path))
- # If ref input is requested for SPEC, we wil run SPEC through its new test
- # module so skip SPEC as part of NT.
+ # If ref input is requested for SPEC, we wil run SPEC through its new
+ # test module so skip SPEC as part of NT.
if self.test_spec_ref:
make_variables['USE_SPEC_TEST_MODULE'] = '1'
@@ -374,13 +375,14 @@ class TestConfiguration(object):
# CC_UNDER_TEST_TARGET_IS_ARMV7).
if '-' in cc_info.get('cc_target', ''):
arch_name = cc_info.get('cc_target').split('-', 1)[0]
- make_variables['CC_UNDER_TEST_TARGET_IS_' + arch_name.upper()] = '1'
+ CC_VAR = 'CC_UNDER_TEST_TARGET_IS_' + arch_name.upper()
+ make_variables[CC_VAR] = '1'
# Set LLVM_RELEASE_IS_PLUS_ASSERTS when appropriate, to allow
# testing older LLVM source trees.
llvm_source_version = self.llvm_source_version
- if (llvm_source_version and llvm_source_version.isdigit() and
- int(llvm_source_version) < 107758):
+ if llvm_source_version and llvm_source_version.isdigit() and \
+ int(llvm_source_version) < 107758:
make_variables['LLVM_RELEASE_IS_PLUS_ASSERTS'] = 1
# Set ARCH appropriately, based on the inferred target.
@@ -399,8 +401,8 @@ class TestConfiguration(object):
# FIXME: Clean this up once everyone is on 'lnt runtest
# nt' style nightly testing.
arch = cc_target.split('-', 1)[0].lower()
- if (len(arch) == 4 and arch[0] == 'i' and arch.endswith('86') and
- arch[1] in '3456789'): # i[3-9]86
+ if len(arch) == 4 and arch[0] == 'i' and arch.endswith('86') and \
+ arch[1] in '3456789': # i[3-9]86
llvm_arch = 'x86'
elif arch in ('x86_64', 'amd64'):
llvm_arch = 'x86_64'
@@ -458,12 +460,14 @@ class TestConfiguration(object):
###
+
def scan_for_test_modules(config):
base_modules_path = os.path.join(config.test_suite_root, 'LNTBased')
if config.only_test is None:
test_modules_path = base_modules_path
elif config.only_test.startswith('LNTBased'):
- test_modules_path = os.path.join(config.test_suite_root, config.only_test)
+ test_modules_path = os.path.join(config.test_suite_root,
+ config.only_test)
else:
return
@@ -471,8 +475,8 @@ def scan_for_test_modules(config):
# various "suites" of LNTBased tests in separate repositories, and allowing
# users to just checkout them out elsewhere and link them into their LLVM
# test-suite source tree.
- for dirpath,dirnames,filenames in os.walk(test_modules_path,
- followlinks = True):
+ for dirpath, dirnames, filenames in os.walk(test_modules_path,
+ followlinks=True):
# Ignore the example tests, unless requested.
if not config.include_test_examples and 'Examples' in dirnames:
dirnames.remove('Examples')
@@ -492,30 +496,32 @@ def scan_for_test_modules(config):
assert dirpath.startswith(base_modules_path + '/')
yield dirpath[len(base_modules_path) + 1:]
+
def execute_command(test_log, basedir, args, report_dir):
- logfile = test_log
+ logfile = test_log
+
+ if report_dir is not None:
+ logfile = subprocess.PIPE
+ # Open a duplicated logfile at the global dir.
+ _, logname = os.path.split(test_log.name)
+ global_log_path = os.path.join(report_dir, logname)
+ global_log = open(global_log_path, 'a+')
+
+ p = subprocess.Popen(args=args, stdin=None, stdout=logfile,
+ stderr=subprocess.STDOUT, cwd=basedir,
+ env=os.environ)
+
+ if report_dir is not None:
+ while p.poll() is None:
+ line = p.stdout.readline()
+ if len(line) > 0:
+ test_log.write(line)
+ global_log.write(line)
- if report_dir is not None:
- logfile = subprocess.PIPE
- # Open a duplicated logfile at the global dir.
- _, logname = os.path.split(test_log.name)
- global_log_path = os.path.join(report_dir, logname)
- global_log = open(global_log_path, 'a+')
-
- p = subprocess.Popen(args=args, stdin=None, stdout=logfile,
- stderr=subprocess.STDOUT, cwd=basedir,
- env=os.environ)
-
- if report_dir is not None:
- while p.poll() is None:
- l = p.stdout.readline()
- if len(l) > 0:
- test_log.write(l)
- global_log.write(l)
+ global_log.close()
- global_log.close()
+ return p.wait()
- return p.wait()
# FIXME: Support duplicate logfiles to global directory.
def execute_test_modules(test_log, test_modules, test_module_variables,
@@ -537,7 +543,7 @@ def execute_test_modules(test_log, test_
module_file = open(module_path)
try:
exec module_file in locals, globals
- except:
+ except Exception:
info = traceback.format_exc()
fatal("unable to import test module: %r\n%s" % (
module_path, info))
@@ -549,7 +555,7 @@ def execute_test_modules(test_log, test_
module_path,))
try:
test_instance = test_class()
- except:
+ except Exception:
info = traceback.format_exc()
fatal("unable to instantiate test class for: %r\n%s" % (
module_path, info))
@@ -558,7 +564,8 @@ def execute_test_modules(test_log, test_
fatal("invalid test class (expected lnt.tests.nt.TestModule "
"subclass) for: %r" % module_path)
- # Create the per test variables, and ensure the output directory exists.
+ # Create the per test variables, and ensure the output directory
+ # exists.
variables = test_module_variables.copy()
variables['MODULENAME'] = name
variables['SRCROOT'] = test_path
@@ -567,8 +574,9 @@ def execute_test_modules(test_log, test_
# Execute the tests.
try:
- test_samples = test_instance._execute_test(test_log, variables, make_variables, config)
- except:
+ test_samples = test_instance._execute_test(test_log, variables,
+ make_variables, config)
+ except Exception:
info = traceback.format_exc()
fatal("exception executing tests for: %r\n%s" % (
module_path, info))
@@ -581,7 +589,7 @@ def execute_test_modules(test_log, test_
if not isinstance(item, lnt.testing.TestSamples):
is_ok = False
break
- except:
+ except Exception:
is_ok = False
if not is_ok:
fatal("test module did not return samples list: %r" % (
@@ -591,23 +599,26 @@ def execute_test_modules(test_log, test_
return results
+
def compute_test_module_variables(make_variables, config):
- # Set the test module options, which we try and restrict to a tighter subset
- # than what we pass to the LNT makefiles.
+ # Set the test module options, which we try and restrict to a tighter
+ # subset than what we pass to the LNT makefiles.
test_module_variables = {
- 'CC' : make_variables['TARGET_LLVMGCC'],
- 'CXX' : make_variables['TARGET_LLVMGXX'],
- 'CFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' +
- make_variables['OPTFLAGS']),
- 'CXXFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' +
- make_variables['OPTFLAGS']) }
+ 'CC': make_variables['TARGET_LLVMGCC'],
+ 'CXX': make_variables['TARGET_LLVMGXX'],
+ 'CFLAGS': (make_variables['TARGET_FLAGS'] + ' ' +
+ make_variables['OPTFLAGS']),
+ 'CXXFLAGS': (make_variables['TARGET_FLAGS'] + ' ' +
+ make_variables['OPTFLAGS']),
+ }
# Add the remote execution variables.
if config.remote:
test_module_variables['REMOTE_HOST'] = make_variables['REMOTE_HOST']
test_module_variables['REMOTE_USER'] = make_variables['REMOTE_USER']
test_module_variables['REMOTE_PORT'] = make_variables['REMOTE_PORT']
- test_module_variables['REMOTE_CLIENT'] = make_variables['REMOTE_CLIENT']
+ test_module_variables['REMOTE_CLIENT'] = \
+ make_variables['REMOTE_CLIENT']
# Add miscellaneous optional variables.
if 'LD_ENV_OVERRIDES' in make_variables:
@@ -628,16 +639,17 @@ def compute_test_module_variables(make_v
# We pass the test execution values as variables too, this might be better
# passed as actual arguments.
test_module_variables['THREADS'] = config.threads
- test_module_variables['BUILD_THREADS'] = config.build_threads or \
- config.threads
+ test_module_variables['BUILD_THREADS'] = \
+ config.build_threads or config.threads
return test_module_variables
+
def execute_nt_tests(test_log, make_variables, basedir, config):
report_dir = config.report_dir
common_args = ['make', '-k']
- common_args.extend('%s=%s' % (k,v) for k,v in make_variables.items())
+ common_args.extend('%s=%s' % (k, v) for k, v in make_variables.items())
if config.only_test is not None:
- common_args.extend(['-C',config.only_test])
+ common_args.extend(['-C', config.only_test])
# If we are using isolation, run under sandbox-exec.
if config.use_isolation:
@@ -667,25 +679,27 @@ def execute_nt_tests(test_log, make_vari
(regex #"^/private/var/folders/")
(regex #"^/dev/")
(regex #"^%s"))""" % (basedir,)
- common_args = ['sandbox-exec', '-f', sandbox_profile_path] + common_args
+ common_args = ['sandbox-exec', '-f', sandbox_profile_path] +\
+ common_args
# Run a separate 'make build' step if --build-threads was given.
if config.build_threads > 0:
- args = common_args + ['-j', str(config.build_threads), 'build']
- print >>test_log, '%s: running: %s' % (timestamp(),
- ' '.join('"%s"' % a
- for a in args))
- test_log.flush()
-
- print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % (
- timestamp(), config.build_threads)
- res = execute_command(test_log, basedir, args, report_dir)
- if res != 0:
- print >> sys.stderr, "Failure while running make build! See log: %s"%(test_log.name)
+ args = common_args + ['-j', str(config.build_threads), 'build']
+ print >>test_log, '%s: running: %s' % (timestamp(),
+ ' '.join('"%s"' % a
+ for a in args))
+ test_log.flush()
+
+ print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % (
+ timestamp(), config.build_threads)
+ res = execute_command(test_log, basedir, args, report_dir)
+ if res != 0:
+ print >> sys.stderr, "Failure while running make build! " \
+ "See log: %s" % test_log.name
# Then 'make report'.
args = common_args + ['-j', str(config.threads),
- 'report', 'report.%s.csv' % config.test_style]
+ 'report', 'report.%s.csv' % config.test_style]
print >>test_log, '%s: running: %s' % (timestamp(),
' '.join('"%s"' % a
for a in args))
@@ -700,9 +714,12 @@ def execute_nt_tests(test_log, make_vari
res = execute_command(test_log, basedir, args, report_dir)
if res != 0:
- print >> sys.stderr, "Failure while running nightly tests! See log: %s" % (test_log.name)
+ print >> sys.stderr, "Failure while running nightly tests! "\
+ "See log: %s" % test_log.name
-# Keep a mapping of mangled test names, to the original names in the test-suite.
+
+# Keep a mapping of mangled test names, to the original names in the
+# test-suite.
TEST_TO_NAME = {}
KNOWN_SAMPLE_KEYS = ('compile', 'exec', 'hash',
'gcc.compile', 'bc.compile', 'llc.compile',
@@ -782,7 +799,7 @@ def load_nt_report_file(report_path, con
program = os.path.join(config.rerun_test, program)
program_real = program
- program_mangled = program.replace('.','_')
+ program_mangled = program.replace('.', '_')
test_base_name = program_mangled
# Check if this is a subtest result, in which case we ignore missing
@@ -846,6 +863,7 @@ def load_nt_report_file(report_path, con
return test_samples, no_errors
+
def prepare_report_dir(config):
# Set up the sandbox.
sandbox_path = config.sandbox_path
@@ -867,7 +885,8 @@ def prepare_report_dir(config):
if needs_clean and config.timestamp_build:
fatal('refusing to reuse pre-existing build dir %r' % report_dir)
-def prepare_build_dir(config, iteration) :
+
+def prepare_build_dir(config, iteration):
# report_dir is supposed to be canonicalized, so we do not need to
# call os.path.realpath before mkdir.
build_dir = config.build_dir(iteration)
@@ -885,12 +904,13 @@ def prepare_build_dir(config, iteration)
fatal('refusing to reuse pre-existing build dir %r' % build_dir)
return build_dir
+
def update_tools(make_variables, config, iteration):
"""Update the test suite tools. """
print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),)
args = ['make', 'tools']
- args.extend('%s=%s' % (k,v) for k,v in make_variables.items())
+ args.extend('%s=%s' % (k, v) for k, v in make_variables.items())
build_tools_log_path = os.path.join(config.build_dir(iteration),
'build-tools.log')
build_tools_log = open(build_tools_log_path, 'w')
@@ -902,7 +922,9 @@ def update_tools(make_variables, config,
args, config.report_dir)
build_tools_log.close()
if res != 0:
- fatal('Unable to build tools, aborting! See log: %s'%(build_tools_log_path))
+ fatal('Unable to build tools, aborting! See log: %s' %
+ build_tools_log_path)
+
def configure_test_suite(config, iteration):
"""Run configure on the test suite."""
@@ -912,7 +934,7 @@ def configure_test_suite(config, iterati
configure_log = open(configure_log_path, 'w')
args = [os.path.realpath(os.path.join(config.test_suite_root,
- 'configure'))]
+ 'configure'))]
if config.without_llvm:
args.extend(['--without-llvmsrc', '--without-llvmobj'])
else:
@@ -936,6 +958,7 @@ def configure_test_suite(config, iterati
if res != 0:
fatal('Configure failed, log is here: %r' % configure_log_path)
+
def copy_missing_makefiles(config, basedir):
"""When running with only_test something, makefiles will be missing,
so copy them into place. """
@@ -950,6 +973,7 @@ def copy_missing_makefiles(config, based
shutil.copyfile(os.path.join(src_path, 'Makefile'),
os.path.join(obj_path, 'Makefile'))
+
def run_test(nick_prefix, iteration, config):
print >>sys.stderr, "%s: checking source versions" % (
timestamp(),)
@@ -961,7 +985,8 @@ def run_test(nick_prefix, iteration, con
# Compute the test module variables, which are a restricted subset of the
# make variables.
- test_module_variables = compute_test_module_variables(make_variables, config)
+ test_module_variables = compute_test_module_variables(make_variables,
+ config)
# Scan for LNT-based test modules.
print >>sys.stderr, "%s: scanning for LNT-based test modules" % (
@@ -986,7 +1011,6 @@ def run_test(nick_prefix, iteration, con
start_time = timestamp()
print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir)
-
# Configure the test suite.
if config.run_configure or not os.path.exists(os.path.join(
basedir, 'Makefile.config')):
@@ -994,14 +1018,15 @@ def run_test(nick_prefix, iteration, con
# If running with --only-test, creating any dirs which might be missing and
# copy Makefiles.
- if config.only_test is not None and not config.only_test.startswith("LNTBased"):
+ if config.only_test is not None and \
+ not config.only_test.startswith("LNTBased"):
copy_missing_makefiles(config, basedir)
# If running without LLVM, make sure tools are up to date.
if config.without_llvm:
update_tools(make_variables, config, iteration)
- # Always blow away any existing report.
+ # Always blow away any existing report.
build_report_path = config.build_report_path(iteration)
if os.path.exists(build_report_path):
os.remove(build_report_path)
@@ -1043,7 +1068,7 @@ def run_test(nick_prefix, iteration, con
# Merge in the test samples from all of the test modules.
existing_tests = set(s.name for s in test_samples)
- for module,results in test_module_results:
+ for module, results in test_module_results:
for s in results:
if s.name in existing_tests:
fatal("test module %r added duplicate test: %r" % (
@@ -1056,9 +1081,9 @@ def run_test(nick_prefix, iteration, con
#
# FIXME: Import full range of data that the Clang tests are using?
machine_info = {}
- machine_info['hardware'] = capture(["uname","-m"],
+ machine_info['hardware'] = capture(["uname", "-m"],
include_stderr=True).strip()
- machine_info['os'] = capture(["uname","-sr"], include_stderr=True).strip()
+ machine_info['os'] = capture(["uname", "-sr"], include_stderr=True).strip()
if config.cc_reference is not None:
machine_info['gcc_version'] = capture(
[config.cc_reference, '--version'],
@@ -1098,11 +1123,13 @@ def run_test(nick_prefix, iteration, con
else:
machdep_info = run_info
- machdep_info['uname'] = capture(["uname","-a"], include_stderr=True).strip()
- machdep_info['name'] = capture(["uname","-n"], include_stderr=True).strip()
+ machdep_info['uname'] = capture(["uname", "-a"],
+ include_stderr=True).strip()
+ machdep_info['name'] = capture(["uname", "-n"],
+ include_stderr=True).strip()
- # FIXME: Hack, use better method of getting versions. Ideally, from binaries
- # so we are more likely to be accurate.
+ # FIXME: Hack, use better method of getting versions. Ideally, from
+ # binaries so we are more likely to be accurate.
if config.llvm_source_version is not None:
run_info['llvm_revision'] = config.llvm_source_version
run_info['test_suite_revision'] = test_suite_source_version
@@ -1117,17 +1144,17 @@ def run_test(nick_prefix, iteration, con
run_info['run_order'] = config.cc_info['inferred_run_order']
# Add any user specified parameters.
- for target,params in ((machine_info, config.machine_parameters),
- (run_info, config.run_parameters)):
+ for target, params in ((machine_info, config.machine_parameters),
+ (run_info, config.run_parameters)):
for entry in params:
if '=' not in entry:
- name,value = entry,''
+ name, value = entry, ''
else:
- name,value = entry.split('=', 1)
+ name, value = entry.split('=', 1)
if name in target:
logger.warning("parameter %r overwrote existing value: %r" %
(name, target.get(name)))
- print target,name,value
+ print target, name, value
target[name] = value
# Generate the test report.
@@ -1135,24 +1162,26 @@ def run_test(nick_prefix, iteration, con
print >>sys.stderr, '%s: generating report: %r' % (timestamp(),
lnt_report_path)
machine = lnt.testing.Machine(nick, machine_info)
- run = lnt.testing.Run(start_time, end_time, info = run_info)
+ run = lnt.testing.Run(start_time, end_time, info=run_info)
report = lnt.testing.Report(machine, run, test_samples)
lnt_report_file = open(lnt_report_path, 'w')
- print >>lnt_report_file,report.render()
+ print >>lnt_report_file, report.render()
lnt_report_file.close()
return report
###
+
def _construct_report_path(basedir, only_test, test_style, file_type="csv"):
"""Get the full path to report files in the sandbox.
"""
report_path = os.path.join(basedir)
if only_test is not None:
- report_path = os.path.join(report_path, only_test)
- report_path = os.path.join(report_path, ('report.%s.' % test_style) + file_type)
+ report_path = os.path.join(report_path, only_test)
+ report_path = os.path.join(report_path,
+ ('report.%s.' % test_style) + file_type)
return report_path
@@ -1173,8 +1202,8 @@ def rerun_test(config, name, num_times):
test_full_path = os.path.join(
config.report_dir, relative_test_path)
- assert os.path.exists(test_full_path), "Previous test directory not there?" + \
- test_full_path
+ assert os.path.exists(test_full_path), \
+ "Previous test directory not there?" + test_full_path
results = []
no_errors = True
@@ -1185,7 +1214,8 @@ def rerun_test(config, name, num_times):
results.extend(test_results)
# Check we got an exec and status from each run.
- assert len(results) >= num_times, "Did not get all the runs?" + str(results)
+ assert len(results) >= num_times, \
+ "Did not get all the runs?" + str(results)
logfile.close()
return results, no_errors
@@ -1210,7 +1240,8 @@ def _prepare_testsuite_for_rerun(test_na
os.remove(path)
-def _execute_test_again(config, test_name, test_path, test_relative_path, logfile):
+def _execute_test_again(config, test_name, test_path, test_relative_path,
+ logfile):
"""(Re)Execute the benchmark of interest. """
_prepare_testsuite_for_rerun(test_name, test_path, config)
@@ -1229,36 +1260,40 @@ def _execute_test_again(config, test_nam
config.rerun_test = test_relative_path
# The target for the specific benchmark.
# Make target.
- benchmark_report_target = "Output/" + test_name + \
+ benchmark_report_target = "Output/" + test_name + \
"." + config.test_style + ".report.txt"
# Actual file system location of the target.
- benchmark_report_path = os.path.join(config.build_dir(None),
- test_path,
- benchmark_report_target)
+ benchmark_report_path = os.path.join(config.build_dir(None),
+ test_path,
+ benchmark_report_target)
to_exec.append(benchmark_report_target)
returncode = execute_command(logfile,
- config.build_dir(None), to_exec, config.report_dir)
+ config.build_dir(None), to_exec,
+ config.report_dir)
assert returncode == 0, "Remake command failed."
assert os.path.exists(benchmark_report_path), "Missing " \
"generated report: " + benchmark_report_path
# Now we need to pull out the results into the CSV format LNT can read.
schema = os.path.join(config.test_suite_root,
- "TEST." + config.test_style + ".report")
- result_path = os.path.join(config.build_dir(None),
- test_path, "Output",
- test_name + "." + config.test_style + ".report.csv")
+ "TEST." + config.test_style + ".report")
+ result_path = os.path.join(config.build_dir(None),
+ test_path, "Output",
+ test_name + "." + config.test_style +
+ ".report.csv")
gen_report_template = "{gen} -csv {schema} < {input} > {output}"
gen_cmd = gen_report_template.format(gen=config.generate_report_script,
- schema=schema, input=benchmark_report_path, output=result_path)
- bash_gen_cmd = ["/bin/bash", "-c", gen_cmd]
+ schema=schema,
+ input=benchmark_report_path,
+ output=result_path)
+ bash_gen_cmd = ["/bin/bash", "-c", gen_cmd]
assert not os.path.exists(result_path), "Results should not exist yet." + \
result_path
- returncode = execute_command(logfile,
- config.build_dir(None), bash_gen_cmd, config.report_dir)
+ returncode = execute_command(logfile, config.build_dir(None), bash_gen_cmd,
+ config.report_dir)
assert returncode == 0, "command failed"
assert os.path.exists(result_path), "Missing results file."
@@ -1266,9 +1301,11 @@ def _execute_test_again(config, test_nam
assert len(results) > 0
return results, no_errors
+
def _unix_quote_args(s):
return map(pipes.quote, shlex.split(s))
+
# When set to true, all benchmarks will be rerun.
# TODO: remove me when rerun patch is done.
NUMBER_OF_RERUNS = 4
@@ -1419,9 +1456,9 @@ def _process_reruns(config, server_reply
if SERVER_COMPILE_RESULT in test_type:
if new_entry.compile_status is None:
new_entry.compile_status = results_status
- elif SERVER_EXEC_RESULT in test_type or \
- SERVER_SCORE_RESULT in test_type or \
- SERVER_MEM_RESULT in test_type:
+ elif (SERVER_EXEC_RESULT in test_type or
+ SERVER_SCORE_RESULT in test_type or
+ SERVER_MEM_RESULT in test_type):
if new_entry.execution_status is None:
# If the server has not seen the test before, it will return
# None for the performance results analysis. In this case we
@@ -1480,8 +1517,8 @@ Basic usage:
--test-suite ~/llvm-test-suite
where --sandbox is the directory to build and store results in, --cc and --cxx
-are the full paths to the compilers to test, and --test-suite is the path to the
-test-suite source.
+are the full paths to the compilers to test, and --test-suite is the path to
+the test-suite source.
To do a quick test, you can add something like:
@@ -1553,8 +1590,9 @@ class NTTest(builtintest.BuiltinTest):
# If there was no --cxx given, attempt to infer it from the --cc.
if opts.cxx_under_test is None:
- opts.cxx_under_test = lnt.testing.util.compilers.infer_cxx_compiler(
- opts.cc_under_test)
+ opts.cxx_under_test = \
+ lnt.testing.util.compilers.infer_cxx_compiler(
+ opts.cc_under_test)
if opts.cxx_under_test is not None:
logger.info("inferred C++ compiler under test as: %r" %
(opts.cxx_under_test,))
@@ -1583,7 +1621,7 @@ class NTTest(builtintest.BuiltinTest):
# given a C++ compiler that doesn't exist, reset it to just use the
# given C compiler.
if not os.path.exists(opts.cxx_under_test):
- logger.warning("invalid cxx_under_test, " +
+ logger.warning("invalid cxx_under_test, " +
"falling back to cc_under_test")
opts.cxx_under_test = opts.cc_under_test
@@ -1624,7 +1662,8 @@ class NTTest(builtintest.BuiltinTest):
self._fatal('--remote is required with --remote-user')
if opts.spec_with_pgo and not opts.test_spec_ref:
- self._fatal('--spec-with-pgo is only supported with --spec-with-ref')
+ self._fatal('--spec-with-pgo is only supported with '
+ '--spec-with-ref')
# libLTO should exist, if given.
if opts.liblto_path:
@@ -1632,7 +1671,8 @@ class NTTest(builtintest.BuiltinTest):
self._fatal('invalid --liblto-path argument %r' % (
opts.liblto_path,))
- # Support disabling test suite externals separately from providing path.
+ # Support disabling test suite externals separately from providing
+ # path.
if not opts.test_externals:
opts.test_suite_externals = '/dev/null'
else:
@@ -1654,8 +1694,8 @@ class NTTest(builtintest.BuiltinTest):
# test-suite directory, that borks things. <rdar://problem/7876418>
prepare_report_dir(config)
- # These notes are used by the regression tests to check if we've handled
- # flags correctly.
+ # These notes are used by the regression tests to check if we've
+ # handled flags correctly.
logger.info('TARGET_FLAGS: {}'.format(' '.join(config.target_flags)))
if config.qemu_user_mode:
logger.info('QEMU_USER_MODE_COMMAND: {}'
@@ -1668,8 +1708,8 @@ class NTTest(builtintest.BuiltinTest):
reports = []
for i in range(opts.multisample):
- print >>sys.stderr, "%s: (multisample) running iteration %d" % (
- timestamp(), i)
+ print >>sys.stderr, "%s: (multisample) running iteration %d" %\
+ (timestamp(), i)
report = run_test(opts.label, i, config)
reports.append(report)
@@ -1746,7 +1786,8 @@ class NTTest(builtintest.BuiltinTest):
import lnt.server.db.v4db
import lnt.server.config
db = lnt.server.db.v4db.V4DB("sqlite:///:memory:",
- lnt.server.config.Config.dummy_instance())
+ lnt.server.config.Config
+ .dummy_instance())
session = db.make_session()
result = lnt.util.ImportData.import_and_report(
None, None, db, session, report_path, 'json', 'nts')
@@ -1790,6 +1831,7 @@ def _tools_check():
if status > 0:
raise SystemExit("""error: tclsh not available on your system.""")
+
# FIXME: an equivalent to argparse's add_argument_group is not implemented
# on click. Need to review it when such functionality is available.
# https://github.com/pallets/click/issues/373
Modified: lnt/trunk/lnt/tests/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/test_suite.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/test_suite.py (original)
+++ lnt/trunk/lnt/tests/test_suite.py Wed Nov 8 16:27:12 2017
@@ -36,6 +36,10 @@ from lnt.tests.builtintest import Builti
TEST_SUITE_KNOWN_ARCHITECTURES = ['ARM', 'AArch64', 'Mips', 'X86']
KNOWN_SAMPLE_KEYS = ['compile', 'exec', 'hash', 'score']
+_LNT_CODES = {
+
+}
+
XML_REPORT_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites>
{% for suite in suites %}
@@ -71,28 +75,32 @@ XML_REPORT_TEMPLATE = """<?xml version="
</testsuites>
"""
-CSV_REPORT_TEMPLATE = \
-"""Program;CC;CC_Time;CC_Hash;Exec;Exec_Time;Score
+CSV_REPORT_TEMPLATE = """\
+Program;CC;CC_Time;CC_Hash;Exec;Exec_Time;Score
{%- for suite in suites -%}
{%- for test in suite.tests %}
{{ suite.name }}/{{ test.path }}/{{ test.name }};
{%- if test.code == "NOEXE" -%}
fail;*;*;
{%- else -%}
- pass;{{ test.metrics.compile_time if test.metrics }};{{ test.metrics.hash if test.metrics }};
+ pass;{{ test.metrics.compile_time if test.metrics }};\
+{{ test.metrics.hash if test.metrics }};
{%- endif -%}
{%- if test.code == "FAIL" or test.code == "NOEXE" -%}
fail;*;*;
{%- else -%}
- pass;{{ test.metrics.exec_time if test.metrics }};{{ test.metrics.score if test.metrics }};
+ pass;{{ test.metrics.exec_time if test.metrics }};\
+{{ test.metrics.score if test.metrics }};
{%- endif -%}
{% endfor %}
{%- endfor -%}
"""
-# _importProfile imports a single profile. It must be at the top level (and
-# not within TestSuiteTest) so that multiprocessing can import it correctly.
+
def _importProfile(name_filename):
+ """_importProfile imports a single profile. It must be at the top level
+ (and not within TestSuiteTest) so that multiprocessing can import it
+ correctly."""
name, filename = name_filename
if not os.path.exists(filename):
@@ -141,7 +149,8 @@ def _lit_json_to_template(json_reports,
entry = {'name': suite,
'id': id,
'tests': tests,
- 'timestamp': datetime.datetime.now().replace(microsecond=0).isoformat(),
+ 'timestamp': datetime.datetime.now().replace(microsecond=0)
+ .isoformat(),
'num_tests': len(tests),
'num_failures': len(
[x for x in tests if x['code'] == 'FAIL']),
@@ -249,8 +258,8 @@ class TestSuiteTest(BuiltinTest):
" test or directory name)")
if opts.single_result and not opts.only_test[1]:
- self._fatal("--single-result must be given a single test name, not a " +
- "directory name")
+ self._fatal("--single-result must be given a single test name, "
+ "not a directory name")
opts.cppflags = ' '.join(opts.cppflags)
opts.cflags = ' '.join(opts.cflags)
@@ -302,7 +311,8 @@ class TestSuiteTest(BuiltinTest):
# Construct the nickname from a few key parameters.
cc_info = self._get_cc_info(cmake_vars)
cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
- opts.label += "__%s__%s" % (cc_nick, cc_info['cc_target'].split('-')[0])
+ opts.label += "__%s__%s" %\
+ (cc_nick, cc_info['cc_target'].split('-')[0])
logger.info('Using nickname: %r' % opts.label)
# When we can't detect the clang version we use 0 instead. That
@@ -432,7 +442,8 @@ class TestSuiteTest(BuiltinTest):
if self.opts.cxx:
defs['CMAKE_CXX_COMPILER'] = self.opts.cxx
- cmake_build_types = ('DEBUG','MINSIZEREL', 'RELEASE', 'RELWITHDEBINFO')
+ cmake_build_types = ('DEBUG', 'MINSIZEREL', 'RELEASE',
+ 'RELWITHDEBINFO')
if self.opts.cppflags or self.opts.cflags:
all_cflags = ' '.join([self.opts.cppflags, self.opts.cflags])
defs['CMAKE_C_FLAGS'] = self._unix_quote_args(all_cflags)
@@ -450,7 +461,8 @@ class TestSuiteTest(BuiltinTest):
defs['CMAKE_CXX_FLAGS_'+build_type] = ""
if self.opts.run_under:
- defs['TEST_SUITE_RUN_UNDER'] = self._unix_quote_args(self.opts.run_under)
+ defs['TEST_SUITE_RUN_UNDER'] = \
+ self._unix_quote_args(self.opts.run_under)
if self.opts.benchmarking_only:
defs['TEST_SUITE_BENCHMARKING_ONLY'] = 'ON'
if self.opts.only_compile:
@@ -517,10 +529,10 @@ class TestSuiteTest(BuiltinTest):
if value is not None:
early_defs[key] = value
- cmake_cmd = [cmake_cmd] + \
- ['-D%s=%s' % (k, v) for k, v in early_defs.items()] + \
- cmake_flags + [self._test_suite_dir()] + \
- ['-D%s=%s' % (k, v) for k, v in defs.items()]
+ cmake_cmd = ([cmake_cmd] +
+ ['-D%s=%s' % (k, v) for k, v in early_defs.items()] +
+ cmake_flags + [self._test_suite_dir()] +
+ ['-D%s=%s' % (k, v) for k, v in defs.items()])
if execute:
self._check_call(cmake_cmd, cwd=path)
@@ -582,13 +594,14 @@ class TestSuiteTest(BuiltinTest):
if nr_threads != 1:
logger.warning('Gathering profiles with perf requires -j 1 ' +
'as perf record cannot be run multiple times ' +
- 'simultaneously. Overriding -j %s to -j 1' % \
+ 'simultaneously. Overriding -j %s to -j 1' %
nr_threads)
nr_threads = 1
extra_args += ['--param', 'profile=perf']
if self.opts.perf_events:
extra_args += ['--param',
- 'perf_profile_events=%s' % self.opts.perf_events]
+ 'perf_profile_events=%s' %
+ self.opts.perf_events]
logger.info('Testing...')
try:
@@ -604,20 +617,21 @@ class TestSuiteTest(BuiltinTest):
try:
return json.loads(open(output_json_path.name).read())
except ValueError as e:
- fatal("Running test-suite did not create valid json report in {}: {}".format(
- output_json_path.name, e.message))
+ fatal("Running test-suite did not create valid json report "
+ "in {}: {}".format(output_json_path.name, e.message))
def _is_pass_code(self, code):
return code in ('PASS', 'XPASS', 'XFAIL')
def _get_lnt_code(self, code):
- return {'PASS': lnt.testing.PASS,
- 'FAIL': lnt.testing.FAIL,
- 'NOEXE': lnt.testing.FAIL,
- 'XFAIL': lnt.testing.XFAIL,
- 'XPASS': lnt.testing.FAIL,
- 'UNRESOLVED': lnt.testing.FAIL
- }[code]
+ return {
+ 'FAIL': lnt.testing.FAIL,
+ 'NOEXE': lnt.testing.FAIL,
+ 'PASS': lnt.testing.PASS,
+ 'UNRESOLVED': lnt.testing.FAIL,
+ 'XFAIL': lnt.testing.XFAIL,
+ 'XPASS': lnt.testing.FAIL,
+ }[code]
def _extract_cmake_vars_from_cache(self):
assert self.configured is True
@@ -711,7 +725,8 @@ class TestSuiteTest(BuiltinTest):
name = name[:-5]
name = 'nts.' + name
- # If --single-result is given, exit based on --single-result-predicate
+ # If --single-result is given, exit based on
+ # --single-result-predicate
is_pass = self._is_pass_code(code)
if self.opts.single_result and \
raw_name == self.opts.single_result + '.test':
@@ -730,12 +745,14 @@ class TestSuiteTest(BuiltinTest):
profiles_to_import.append((name, v))
continue
- if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore:
+ if k not in LIT_METRIC_TO_LNT or \
+ LIT_METRIC_TO_LNT[k] in ignore:
continue
server_name = name + '.' + LIT_METRIC_TO_LNT[k]
if k == 'link_time':
- # Move link time into a second benchmark's compile-time.
+ # Move link time into a second benchmark's
+ # compile-time.
server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]
test_samples.append(
@@ -752,10 +769,10 @@ class TestSuiteTest(BuiltinTest):
no_errors = False
elif not is_pass:
+ lnt_code = self._get_lnt_code(test_data['code'])
test_samples.append(
lnt.testing.TestSamples(name + '.exec.status',
- [self._get_lnt_code(test_data['code'])],
- test_info))
+ [lnt_code], test_info))
no_errors = False
# Now import the profiles in parallel.
@@ -873,7 +890,8 @@ class TestSuiteTest(BuiltinTest):
f.write(std_out)
# Executable(s) and test file:
shutil.copy(os.path.join(local_path, short_name), report_path)
- shutil.copy(os.path.join(local_path, short_name + ".test"), report_path)
+ shutil.copy(os.path.join(local_path, short_name + ".test"),
+ report_path)
# Temp files are in:
temp_files = os.path.join(local_path, "CMakeFiles",
short_name + ".dir")
@@ -956,7 +974,8 @@ class TestSuiteTest(BuiltinTest):
logger.warning("Tests may fail because of iprofiler's output.")
# The dtps file will be saved as root, make it so
# that we can read it.
- chmod = sudo + ["chown", "-R", getpass.getuser(), short_name + ".dtps"]
+ chmod = sudo + ["chown", "-R", getpass.getuser(),
+ short_name + ".dtps"]
subprocess.call(chmod)
profile = local_path + "/" + short_name + ".dtps"
shutil.copytree(profile, report_path + "/" + short_name + ".dtps")
@@ -968,6 +987,7 @@ class TestSuiteTest(BuiltinTest):
return lnt.util.ImportData.no_submit()
+
@click.command("test-suite", short_help=__doc__)
@click.argument("label", default=platform.uname()[1], required=False,
type=click.UNPROCESSED)
Modified: lnt/trunk/lnt/util/ImportData.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/ImportData.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/util/ImportData.py (original)
+++ lnt/trunk/lnt/util/ImportData.py Wed Nov 8 16:27:12 2017
@@ -58,9 +58,7 @@ def import_and_report(config, db_name, d
startTime = time.time()
try:
data = lnt.formats.read_any(file, format)
- except KeyboardInterrupt:
- raise
- except:
+ except Exception:
import traceback
result['error'] = "could not parse input format"
result['message'] = traceback.format_exc()
Modified: lnt/trunk/lnt/util/ServerUtil.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/ServerUtil.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/util/ServerUtil.py (original)
+++ lnt/trunk/lnt/util/ServerUtil.py Wed Nov 8 16:27:12 2017
@@ -55,7 +55,7 @@ def submitFileToServer(url, file, select
# The result is expected to be a JSON object.
try:
return json.loads(result_data)
- except:
+ except Exception:
import traceback
print "Unable to load result, not a valid JSON object."
print
Modified: lnt/trunk/lnt/util/async_ops.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/async_ops.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/util/async_ops.py (original)
+++ lnt/trunk/lnt/util/async_ops.py Wed Nov 8 16:27:12 2017
@@ -141,7 +141,7 @@ def async_wrapper(job, ts_args, func_arg
logger.info(msg)
else:
logger.warning(msg)
- except:
+ except Exception:
# Put all exception text into an exception and raise that for our
# parent process.
logger.error("Subprocess failed with:" +
Modified: lnt/trunk/lnt/util/stats.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/stats.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/util/stats.py (original)
+++ lnt/trunk/lnt/util/stats.py Wed Nov 8 16:27:12 2017
@@ -3,27 +3,27 @@ import math
from lnt.external.stats.stats import mannwhitneyu as mannwhitneyu_large
-def safe_min(l):
+def safe_min(values):
"""Calculate min, but if given an empty list return None."""
- l = list(l) # In case this is a complex type, get a simple list.
- if not l:
+ values = list(values) # In case this is a complex type, get a simple list.
+ if not values:
return None
else:
- return min(l)
+ return min(values)
-def safe_max(l):
+def safe_max(values):
"""Calculate max, but if given an empty list return None."""
- l = list(l) # In case this is a complex type, get a simple list.
- if not l:
+ values = list(values) # In case this is a complex type, get a simple list.
+ if not values:
return None
else:
- return max(l)
+ return max(values)
-def mean(l):
- if l:
- return sum(l)/len(l)
+def mean(values):
+ if values:
+ return sum(values)/len(values)
else:
return None
@@ -51,24 +51,24 @@ def agg_mean(pairs):
return (None, None)
-def median(l):
- if not l:
+def median(values):
+ if not values:
return None
- l = list(l)
- l.sort()
- N = len(l)
- return (l[(N-1)//2] + l[N//2])*.5
+ values = list(values)
+ values.sort()
+ N = len(values)
+ return (values[(N-1)//2] + values[N//2])*.5
-def median_absolute_deviation(l, med=None):
+def median_absolute_deviation(values, med=None):
if med is None:
- med = median(l)
- return median([abs(x - med) for x in l])
+ med = median(values)
+ return median([abs(x - med) for x in values])
-def standard_deviation(l):
- m = mean(l)
- means_sqrd = sum([(v - m)**2 for v in l]) / len(l)
+def standard_deviation(values):
+ m = mean(values)
+ means_sqrd = sum([(v - m)**2 for v in values]) / len(values)
rms = math.sqrt(means_sqrd)
return rms
@@ -120,6 +120,7 @@ def mannwhitneyu_small(a, b, sigLevel):
same = U <= SIGN_TABLES[sigLevel][len(a) - 1][len(b) - 1]
return same
+
# Table for .10 significance level.
TABLE_0_10 = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
Modified: lnt/trunk/lnt/util/wsgi_restart.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/wsgi_restart.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/lnt/util/wsgi_restart.py (original)
+++ lnt/trunk/lnt/util/wsgi_restart.py Wed Nov 8 16:27:12 2017
@@ -48,7 +48,7 @@ def _modified(path):
if mtime != _times[path]:
return True
- except:
+ except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
@@ -84,9 +84,10 @@ def _monitor():
try:
return _queue.get(timeout=_interval)
- except:
+ except Exception:
pass
+
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
@@ -94,10 +95,11 @@ _thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
- except:
+ except Exception:
pass
_thread.join()
+
atexit.register(_exiting)
Modified: lnt/trunk/setup.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/setup.py?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/setup.py (original)
+++ lnt/trunk/setup.py Wed Nov 8 16:27:12 2017
@@ -12,8 +12,8 @@ if _platform == "darwin":
os.environ["CXX"] = "xcrun --sdk macosx clang"
cflags += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
-# setuptools expects to be invoked from within the directory of setup.py, but it
-# is nice to allow:
+# setuptools expects to be invoked from within the directory of setup.py, but
+# it is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
@@ -37,27 +37,27 @@ except TypeError:
reqs = [str(ir.req) for ir in install_reqs]
setup(
- name = "LNT",
- version = lnt.__version__,
+ name="LNT",
+ version=lnt.__version__,
- author = lnt.__author__,
- author_email = lnt.__email__,
- url = 'http://llvm.org',
- license = 'BSD',
-
- description = "LLVM Nightly Test Infrastructure",
- keywords = 'web testing performance development llvm',
- long_description = """\
+ author=lnt.__author__,
+ author_email=lnt.__email__,
+ url='http://llvm.org',
+ license='BSD',
+
+ description="LLVM Nightly Test Infrastructure",
+ keywords='web testing performance development llvm',
+ long_description="""\
*LNT*
+++++
About
=====
-*LNT* is an infrastructure for performance testing. The software itself consists
-of two main parts, a web application for accessing and visualizing performance
-data, and command line utilities to allow users to generate and submit test
-results to the server.
+*LNT* is an infrastructure for performance testing. The software itself
+consists of two main parts, a web application for accessing and visualizing
+performance data, and command line utilities to allow users to generate and
+submit test results to the server.
The package was originally written for use in testing LLVM compiler
technologies, but is designed to be usable for the performance testing of any
@@ -91,34 +91,35 @@ http://llvm.org/svn/llvm-project/lnt/tru
'Topic :: Software Development :: Testing',
],
- zip_safe = False,
+ zip_safe=False,
# Additional resource extensions we use.
- package_data = {'lnt.server.ui': ['static/*.ico',
- 'static/*.js',
- 'static/*.css',
- 'static/*.svg',
- 'static/bootstrap/css/*.css',
- 'static/bootstrap/js/*.js',
- 'static/bootstrap/img/*.png',
- 'static/flot/*.min.js',
- 'static/d3/*.min.js',
- 'static/jquery/**/*.min.js',
- 'templates/*.html',
- 'templates/reporting/*.html',
- 'templates/reporting/*.txt'],
- 'lnt.server.db': ['migrations/*.py'] },
+ package_data={'lnt.server.ui': ['static/*.ico',
+ 'static/*.js',
+ 'static/*.css',
+ 'static/*.svg',
+ 'static/bootstrap/css/*.css',
+ 'static/bootstrap/js/*.js',
+ 'static/bootstrap/img/*.png',
+ 'static/flot/*.min.js',
+ 'static/d3/*.min.js',
+ 'static/jquery/**/*.min.js',
+ 'templates/*.html',
+ 'templates/reporting/*.html',
+ 'templates/reporting/*.txt'],
+ 'lnt.server.db': ['migrations/*.py'],
+ },
- packages = find_packages(),
+ packages=find_packages(),
- test_suite = 'tests.test_all',
+ test_suite='tests.test_all',
- entry_points = {
+ entry_points={
'console_scripts': [
'lnt = lnt.lnttool:main',
- ],
- },
+ ],
+ },
install_requires=reqs,
- ext_modules = [cPerf],
+ ext_modules=[cPerf],
)
Modified: lnt/trunk/tests/lnttool/submit.shtest
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/submit.shtest?rev=317746&r1=317745&r2=317746&view=diff
==============================================================================
--- lnt/trunk/tests/lnttool/submit.shtest (original)
+++ lnt/trunk/tests/lnttool/submit.shtest Wed Nov 8 16:27:12 2017
@@ -97,7 +97,7 @@ not lnt submit "http://localhost:9091/db
not lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/invalid_submission0.json" >> "${OUTPUT_DIR}/submit_errors.txt" 2>&1
# CHECK-ERRORS: error: lnt server: could not parse input format
# ...
-# CHECK-ERRORS: SystemExit: unable to guess input format for
+# CHECK-ERRORS: ValueError: unable to guess input format for
not lnt submit "http://localhost:9091/db_default/v4/compile/submitRun" "${INPUTS}/invalid_submission1.json" >> "${OUTPUT_DIR}/submit_errors.txt" 2>&1
# CHECK-ERRORS: error: lnt server: import failure: machine
# ...
Added: lnt/trunk/utils/lint.sh
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/utils/lint.sh?rev=317746&view=auto
==============================================================================
--- lnt/trunk/utils/lint.sh (added)
+++ lnt/trunk/utils/lint.sh Wed Nov 8 16:27:12 2017
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+if [ ! -e ./setup.py ]; then
+ echo 1>&2 "Should start this script from the toplevel lnt directory"
+ exit 1
+fi
+pycodestyle --exclude='lnt/external/stats/,docs/conf.py,tests/' .
Propchange: lnt/trunk/utils/lint.sh
------------------------------------------------------------------------------
svn:executable = *
More information about the llvm-commits
mailing list