[LNT] r308790 - lnt/server/ui: pep8 fixes; NFC

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 14:41:39 PDT 2017


Author: matze
Date: Fri Jul 21 14:41:39 2017
New Revision: 308790

URL: http://llvm.org/viewvc/llvm-project?rev=308790&view=rev
Log:
lnt/server/ui: pep8 fixes; NFC

Modified:
    lnt/trunk/lnt/lnttool/import_data.py
    lnt/trunk/lnt/server/db/testsuitedb.py
    lnt/trunk/lnt/server/instance.py
    lnt/trunk/lnt/server/ui/api.py
    lnt/trunk/lnt/server/ui/app.py
    lnt/trunk/lnt/server/ui/decorators.py
    lnt/trunk/lnt/server/ui/filters.py
    lnt/trunk/lnt/server/ui/globals.py
    lnt/trunk/lnt/server/ui/profile_views.py
    lnt/trunk/lnt/server/ui/regression_views.py
    lnt/trunk/lnt/server/ui/util.py
    lnt/trunk/lnt/server/ui/views.py

Modified: lnt/trunk/lnt/lnttool/import_data.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/import_data.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/import_data.py (original)
+++ lnt/trunk/lnt/lnttool/import_data.py Fri Jul 21 14:41:39 2017
@@ -1,6 +1,7 @@
 import click
 import lnt.formats
 
+
 @click.command("import")
 @click.argument("instance_path", type=click.UNPROCESSED)
 @click.argument("files", nargs=-1, type=click.Path(exists=True), required=True)

Modified: lnt/trunk/lnt/server/db/testsuitedb.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/db/testsuitedb.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/db/testsuitedb.py (original)
+++ lnt/trunk/lnt/server/db/testsuitedb.py Fri Jul 21 14:41:39 2017
@@ -368,8 +368,8 @@ class TestSuiteDB(object):
                 }
                 # Leave out: machine_id, simple_run_id, imported_from
                 if flatten_order:
-                    _dict_update_abort_on_duplicates(result,
-                        self.order.__json__(include_id=False))
+                    _dict_update_abort_on_duplicates(
+                        result, self.order.__json__(include_id=False))
                     result['order_by'] = \
                         ','.join([f.name for f in self.order.fields])
                     result['order_id'] = self.order_id
@@ -541,8 +541,8 @@ class TestSuiteDB(object):
                 # Leave out: run_id
                 # TODO: What about profile/profile_id?
                 if flatten_test:
-                    _dict_update_abort_on_duplicates(result,
-                        self.test.__json__(include_id=False))
+                    _dict_update_abort_on_duplicates(
+                        result, self.test.__json__(include_id=False))
                 else:
                     result['test_id'] = self.test_id
                 _dict_update_abort_on_duplicates(result, self.get_fields())
@@ -906,13 +906,13 @@ class TestSuiteDB(object):
             end_time = aniso8601.parse_datetime(run_data['end_time'])
         except ValueError:
             end_time = datetime.datetime.strptime(run_data['end_time'],
-                                              "%Y-%m-%d %H:%M:%S")
+                                                  "%Y-%m-%d %H:%M:%S")
         run_parameters.pop('start_time')
         run_parameters.pop('end_time')
 
         # Convert the rundata into a run record. As with Machines, we construct
-        # the query to look for any existing run at the same time as we build up
-        # the record to possibly add.
+        # the query to look for any existing run at the same time as we build
+        # up the record to possibly add.
         #
         # FIXME: This feels inelegant, can't SA help us out here?
         query = self.query(self.Run).\

Modified: lnt/trunk/lnt/server/instance.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/instance.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/instance.py (original)
+++ lnt/trunk/lnt/server/instance.py Fri Jul 21 14:41:39 2017
@@ -8,6 +8,7 @@ import lnt.server.config
 from lnt.util import logger
 from lnt.testing.util.commands import fatal
 
+
 class Instance(object):
     """
     Wrapper object for representing an LNT instance.

Modified: lnt/trunk/lnt/server/ui/api.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/api.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/api.py (original)
+++ lnt/trunk/lnt/server/ui/api.py Fri Jul 21 14:41:39 2017
@@ -39,8 +39,10 @@ def requires_auth_token(f):
     @wraps(f)
     def decorated(*args, **kwargs):
         token = request.headers.get("AuthToken", None)
-        if not current_app.old_config.api_auth_token or token != current_app.old_config.api_auth_token:
-            abort(401, msg="Auth Token must be passed in AuthToken header, and included in LNT config.")
+        if not current_app.old_config.api_auth_token or \
+           token != current_app.old_config.api_auth_token:
+            abort(401, msg="Auth Token must be passed in AuthToken header, "
+                           "and included in LNT config.")
         return f(*args, **kwargs)
 
     return decorated
@@ -70,7 +72,9 @@ def with_ts(obj):
 
 def common_fields_factory():
     """Get a dict with all the common fields filled in."""
-    common_data = {'generated_by': 'LNT Server v{}'.format(current_app.version)}
+    common_data = {
+        'generated_by': 'LNT Server v{}'.format(current_app.version),
+    }
     return common_data
 
 
@@ -164,7 +168,6 @@ class Machine(Resource):
         stream = stream_with_context(perform_delete(ts, machine))
         return Response(stream, mimetype="text/plain")
 
-
     @staticmethod
     @requires_auth_token
     def post(machine_id):
@@ -267,8 +270,8 @@ class Runs(Resource):
         """Add a new run into the lnt database"""
         db = request.get_db()
         data = request.data
-        result = lnt.util.ImportData.import_from_string(current_app.old_config,
-            g.db_name, db, g.testsuite_name, data)
+        result = lnt.util.ImportData.import_from_string(
+            current_app.old_config, g.db_name, db, g.testsuite_name, data)
 
         new_url = ('%sapi/db_%s/v4/%s/runs/%s' %
                    (request.url_root, g.db_name, g.testsuite_name,
@@ -325,8 +328,8 @@ class SamplesData(Resource):
         run_ids = [int(r) for r in args.get('runid', [])]
 
         if not run_ids:
-            abort(400,
-                  msg='No runids found in args. Should be "samples?runid=1&runid=2" etc.')
+            abort(400, msg='No runids found in args. '
+                           'Should be "samples?runid=1&runid=2" etc.')
 
         to_get = [ts.Sample.id,
                   ts.Sample.run_id,
@@ -344,7 +347,8 @@ class SamplesData(Resource):
         result = common_fields_factory()
         # noinspection PyProtectedMember
         result['samples'] = [{k: v for k, v in sample.items() if v is not None}
-                             for sample in [sample._asdict() for sample in q.all()]]
+                             for sample in [sample._asdict()
+                                            for sample in q.all()]]
 
         return result
 
@@ -369,7 +373,8 @@ class Graph(Resource):
         except NoResultFound:
             abort(404)
 
-        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id) \
+        q = ts.query(field.column, ts.Order.llvm_project_revision,
+                     ts.Run.start_time, ts.Run.id) \
             .join(ts.Run) \
             .join(ts.Order) \
             .filter(ts.Run.machine_id == machine.id) \
@@ -387,9 +392,11 @@ class Graph(Resource):
             if limit:
                 q = q.limit(limit)
 
-        samples = [[convert_revision(rev), val, {'label': rev, 'date': str(time), 'runID': str(rid)}] for
-                   val, rev, time, rid in
-                   q.all()[::-1]]
+        samples = [
+            [convert_revision(rev), val,
+             {'label': rev, 'date': str(time), 'runID': str(rid)}]
+            for val, rev, time, rid in q.all()[::-1]
+        ]
         samples.sort(key=lambda x: x[0])
         return samples
 
@@ -410,7 +417,9 @@ class Regression(Resource):
             .filter(ts.FieldChange.field_id == field.id) \
             .all()
         fc_ids = [x.id for x in fcs]
-        fc_mappings = dict([(x.id, (x.end_order.as_ordered_string(), x.new_value)) for x in fcs])
+        fc_mappings = dict(
+            [(x.id, (x.end_order.as_ordered_string(), x.new_value))
+             for x in fcs])
         if len(fcs) == 0:
             # If we don't find anything, lets see if we are even looking
             # for a valid thing to provide a nice error.
@@ -426,15 +435,21 @@ class Regression(Resource):
                 abort(404)
             # I think we found nothing.
             return []
-        regressions = ts.query(ts.Regression.title, ts.Regression.id, ts.RegressionIndicator.field_change_id,
+        regressions = ts.query(ts.Regression.title, ts.Regression.id,
+                               ts.RegressionIndicator.field_change_id,
                                ts.Regression.state) \
             .join(ts.RegressionIndicator) \
             .filter(ts.RegressionIndicator.field_change_id.in_(fc_ids)) \
             .all()
-        results = [{'title': r.title,
-                    'id': r.id,
-                    'state': r.state,
-                    'end_point': fc_mappings[r.field_change_id]} for r in regressions]
+        results = [
+            {
+                'title': r.title,
+                'id': r.id,
+                'state': r.state,
+                'end_point': fc_mappings[r.field_change_id]
+            }
+            for r in regressions
+        ]
         return results
 
 
@@ -461,5 +476,6 @@ def load_api_resources(api):
     api.add_resource(Order, ts_path("orders/<int:order_id>"))
     graph_url = "graph/<int:machine_id>/<int:test_id>/<int:field_index>"
     api.add_resource(Graph, ts_path(graph_url))
-    regression_url = "regression/<int:machine_id>/<int:test_id>/<int:field_index>"
+    regression_url = \
+        "regression/<int:machine_id>/<int:test_id>/<int:field_index>"
     api.add_resource(Regression, ts_path(regression_url))

Modified: lnt/trunk/lnt/server/ui/app.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/app.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/app.py (original)
+++ lnt/trunk/lnt/server/ui/app.py Fri Jul 21 14:41:39 2017
@@ -44,7 +44,8 @@ class RootSlashPatchMiddleware(object):
 
 
 class LNTObjectJSONEncoder(flask.json.JSONEncoder):
-    """Take SQLAlchemy objects and jsonify them. If the object has an __json__ method, use that instead."""
+    """Take SQLAlchemy objects and jsonify them. If the object has an __json__
+    method, use that instead."""
 
     def __init__(self,  *args, **kwargs):
         super(LNTObjectJSONEncoder, self).__init__(*args, **kwargs)
@@ -56,7 +57,8 @@ class LNTObjectJSONEncoder(flask.json.JS
             return obj.isoformat()
         if isinstance(obj.__class__, DeclarativeMeta):
             fields = {}
-            for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
+            for field in [x for x in dir(obj)
+                          if not x.startswith('_') and x != 'metadata']:
                 data = obj.__getattribute__(field)
                 if isinstance(data, datetime.datetime):
                     fields[field] = data.isoformat()
@@ -88,16 +90,18 @@ class Request(flask.Request):
         """
         get_db() -> <db instance>
 
-        Get the active database and add a logging handler if part of the request
-        arguments.
+        Get the active database and add a logging handler if part of the
+        request arguments.
         """
 
         if self.db is None:
             echo = bool(self.args.get('db_log') or self.form.get('db_log'))
             try:
-                self.db = current_app.old_config.get_database(g.db_name, echo=echo)
+                self.db = current_app.old_config.get_database(g.db_name,
+                                                              echo=echo)
             except DatabaseError:
-                self.db = current_app.old_config.get_database(g.db_name, echo=echo)
+                self.db = current_app.old_config.get_database(g.db_name,
+                                                              echo=echo)
             # Enable SQL logging with db_log.
             #
             # FIXME: Conditionalize on an is_production variable.
@@ -174,7 +178,9 @@ class App(LNTExceptionLoggerFlask):
             message = "{}: {}".format(e.name, e.description)
             if request.accept_mimetypes.accept_json and \
                     not request.accept_mimetypes.accept_html:
-                response = jsonify({'error': 'The page you are looking for does not exist.'})
+                response = jsonify({
+                    'error': 'The page you are looking for does not exist.',
+                })
                 response.status_code = 404
                 return response
             return render_template('error.html', message=message), 404
@@ -183,7 +189,10 @@ class App(LNTExceptionLoggerFlask):
         def internal_server_error(e):
             if request.accept_mimetypes.accept_json and \
                     not request.accept_mimetypes.accept_html:
-                response = jsonify({'error': 'internal server error', 'message': repr(e)})
+                response = jsonify({
+                    'error': 'internal server error',
+                    'message': repr(e),
+                })
                 response.status_code = 500
                 return response
             return render_template('error.html', message=repr(e)), 500
@@ -235,7 +244,8 @@ class App(LNTExceptionLoggerFlask):
         self.logger.addHandler(ch)
 
         # Log to mem for the /log view.
-        h = logging.handlers.MemoryHandler(1024 * 1024, flushLevel=logging.CRITICAL)
+        h = logging.handlers.MemoryHandler(1024 * 1024,
+                                           flushLevel=logging.CRITICAL)
         h.setLevel(logging.DEBUG)
         self.logger.addHandler(h)
         # Also store the logger, so we can render the buffer in it.
@@ -253,7 +263,8 @@ class App(LNTExceptionLoggerFlask):
                 rotating.setLevel(logging.DEBUG)
                 self.logger.addHandler(rotating)
             except (OSError, IOError) as e:
-                print >> sys.stderr, "Error making log file", LOG_FILENAME, str(e)
+                print >> sys.stderr, "Error making log file", \
+                                     LOG_FILENAME, str(e)
                 print >> sys.stderr, "Will not log to file."
             else:
                 self.logger.info("Started file logging.")

Modified: lnt/trunk/lnt/server/ui/decorators.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/decorators.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/decorators.py (original)
+++ lnt/trunk/lnt/server/ui/decorators.py Fri Jul 21 14:41:39 2017
@@ -5,8 +5,9 @@ from flask import request
 
 frontend = flask.Module(__name__)
 
+
 # Decorator for implementing per-database routes.
-def db_route(rule, only_v3 = True, **options):
+def db_route(rule, only_v3=True, **options):
     """
     LNT specific route for endpoints which always refer to some database
     object.
@@ -15,7 +16,7 @@ def db_route(rule, only_v3 = True, **opt
     database, as well as initializing the global database information objects.
     """
     def decorator(f):
-        def wrap(db_name = None, **args):
+        def wrap(db_name=None, **args):
             # Initialize the database parameters on the app globals object.
             g.db_name = db_name or "default"
             g.db_info = current_app.old_config.databases.get(g.db_name)
@@ -31,7 +32,8 @@ UI support for database with version %r
             # Compute result.
             result = f(**args)
 
-            # Make sure that any transactions begun by this request are finished.
+            # Make sure that any transactions begun by this request are
+            # finished.
             request.get_db().rollback()
 
             # Return result.
@@ -44,6 +46,7 @@ UI support for database with version %r
         return wrap
     return decorator
 
+
 # Decorator for implementing per-testsuite routes.
 def v4_route(rule, **options):
     """
@@ -53,7 +56,7 @@ def v4_route(rule, **options):
 
     # FIXME: This is manually composed with db_route.
     def decorator(f):
-        def wrap(testsuite_name, db_name = None, **args):
+        def wrap(testsuite_name, db_name=None, **args):
             # Initialize the test suite parameters on the app globals object.
             g.testsuite_name = testsuite_name
 
@@ -66,7 +69,8 @@ def v4_route(rule, **options):
             # Compute result.
             result = f(**args)
 
-            # Make sure that any transactions begun by this request are finished.
+            # Make sure that any transactions begun by this request are
+            # finished.
             request.get_db().rollback()
 
             # Return result.

Modified: lnt/trunk/lnt/server/ui/filters.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/filters.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/filters.py (original)
+++ lnt/trunk/lnt/server/ui/filters.py Fri Jul 21 14:41:39 2017
@@ -17,7 +17,8 @@ def filter_asisotime(time):
     return ts.isoformat()
 
 
-def filter_aspctcell(value, class_=None, style=None, attributes=None, *args, **kwargs):
+def filter_aspctcell(value, class_=None, style=None, attributes=None, *args,
+                     **kwargs):
     cell = util.PctCell(value, *args, **kwargs)
     return cell.render(class_, style, attributes)
 

Modified: lnt/trunk/lnt/server/ui/globals.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/globals.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/globals.py (original)
+++ lnt/trunk/lnt/server/ui/globals.py Fri Jul 21 14:41:39 2017
@@ -20,7 +20,7 @@ def v4_url_for(*args, **kwargs):
     testsuite_name arguments.
     """
     return flask.url_for(*args, db_name=flask.g.db_name,
-                          testsuite_name=flask.g.testsuite_name, **kwargs)
+                         testsuite_name=flask.g.testsuite_name, **kwargs)
 
 
 def v4_url_available(*args, **kwargs):
@@ -45,5 +45,3 @@ def register(env):
         v4_url_for=v4_url_for,
         v4_url_available=v4_url_available,
         baseline_key=lnt.server.ui.util.baseline_key)
-
-

Modified: lnt/trunk/lnt/server/ui/profile_views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/profile_views.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/profile_views.py (original)
+++ lnt/trunk/lnt/server/ui/profile_views.py Fri Jul 21 14:41:39 2017
@@ -10,13 +10,16 @@ from flask import current_app
 from sqlalchemy.orm.exc import NoResultFound
 import flask
 import json
-import sys, os
+import sys
+import os
 
 from flask import render_template, current_app
-import os, json
+import os
+import json
 from lnt.server.ui.decorators import v4_route, frontend
 from lnt.server.ui.globals import v4_url_for
 
+
 @frontend.route('/profile/admin')
 def profile_admin():
     profileDir = current_app.old_config.profileDir
@@ -34,8 +37,8 @@ def profile_admin():
         age = []
 
     # Convert from UNIX timestamps to Javascript timestamps.
-    history = [[x * 1000, y] for x,y in history]
-    age = [[x * 1000, y] for x,y in age]
+    history = [[x * 1000, y] for x, y in history]
+    age = [[x * 1000, y] for x, y in age]
 
     # Calculate a histogram bucket size that shows ~20 bars on the screen
     num_buckets = 20
@@ -48,7 +51,7 @@ def profile_admin():
 
     # Construct the histogram.
     hist = {}
-    for x,y in age:
+    for x, y in age:
         z = int(float(x) / bucket_size)
         hist.setdefault(z, 0)
         hist[z] += y
@@ -57,6 +60,7 @@ def profile_admin():
     return render_template("profile_admin.html",
                            history=history, age=age, bucket_size=bucket_size)
 
+
 @v4_route("/profile/ajax/getFunctions")
 def v4_profile_ajax_getFunctions():
     ts = request.get_testsuite()
@@ -72,9 +76,10 @@ def v4_profile_ajax_getFunctions():
                .filter(ts.Sample.test_id == testid).first()
     if sample and sample.profile:
         p = sample.profile.load(profileDir)
-        return json.dumps([[n, f] for n,f in p.getFunctions().items()])
+        return json.dumps([[n, f] for n, f in p.getFunctions().items()])
     else:
-        abort(404);
+        abort(404)
+
 
 @v4_route("/profile/ajax/getTopLevelCounters")
 def v4_profile_ajax_getTopLevelCounters():
@@ -92,16 +97,17 @@ def v4_profile_ajax_getTopLevelCounters(
                    .filter(ts.Sample.test_id == testid).first()
         if sample and sample.profile:
             p = sample.profile.load(profileDir)
-            for k,v in p.getTopLevelCounters().items():
+            for k, v in p.getTopLevelCounters().items():
                 tlc.setdefault(k, [None]*len(runids))[idx] = v
         idx += 1
 
     # If the 1'th counter is None for all keys, truncate the list.
     if all(len(k) > 1 and k[1] is None for k in tlc.values()):
-        tlc = {k: [v[0]] for k,v in tlc.items()}
+        tlc = {k: [v[0]] for k, v in tlc.items()}
 
     return json.dumps(tlc)
 
+
 @v4_route("/profile/ajax/getCodeForFunction")
 def v4_profile_ajax_getCodeForFunction():
     ts = request.get_testsuite()
@@ -115,19 +121,22 @@ def v4_profile_ajax_getCodeForFunction()
                .filter(ts.Sample.run_id == runid) \
                .filter(ts.Sample.test_id == testid).first()
     if not sample or not sample.profile:
-        abort(404);
+        abort(404)
 
     p = sample.profile.load(profileDir)
     return json.dumps([x for x in p.getCodeForFunction(f)])
 
+
 @v4_route("/profile/<int:testid>/<int:run1_id>")
 def v4_profile_fwd(testid, run1_id):
     return v4_profile(testid, run1_id)
 
+
 @v4_route("/profile/<int:testid>/<int:run1_id>/<int:run2_id>")
 def v4_profile_fwd2(testid, run1_id, run2_id=None):
     return v4_profile(testid, run1_id, run2_id)
 
+
 def v4_profile(testid, run1_id, run2_id=None):
     ts = request.get_testsuite()
     profileDir = current_app.old_config.profileDir
@@ -177,21 +186,19 @@ def v4_profile(testid, run1_id, run2_id=
         json_run2 = {}
     urls = {
         'search': v4_url_for('v4_search'),
-        'singlerun_template': v4_url_for('v4_profile_fwd',
-                                          testid=1111,
-                                          run1_id=2222) \
-        .replace('1111', '<testid>').replace('2222', '<run1id>'),
-        'comparison_template': v4_url_for('v4_profile_fwd2',
-                                          testid=1111,
-                                          run1_id=2222,
-                                          run2_id=3333) \
-        .replace('1111', '<testid>').replace('2222', '<run1id>') \
-        .replace('3333', '<run2id>'),
-
-        'getTopLevelCounters': v4_url_for('v4_profile_ajax_getTopLevelCounters'),
+        'singlerun_template':
+            v4_url_for('v4_profile_fwd', testid=1111, run1_id=2222)
+            .replace('1111', '<testid>').replace('2222', '<run1id>'),
+        'comparison_template':
+            v4_url_for('v4_profile_fwd2', testid=1111, run1_id=2222,
+                       run2_id=3333)
+            .replace('1111', '<testid>').replace('2222', '<run1id>')
+            .replace('3333', '<run2id>'),
+        'getTopLevelCounters':
+            v4_url_for('v4_profile_ajax_getTopLevelCounters'),
         'getFunctions': v4_url_for('v4_profile_ajax_getFunctions'),
-        'getCodeForFunction': v4_url_for('v4_profile_ajax_getCodeForFunction'),
-
+        'getCodeForFunction':
+            v4_url_for('v4_profile_ajax_getCodeForFunction'),
     }
     return render_template("v4_profile.html",
                            ts=ts, test=test,

Modified: lnt/trunk/lnt/server/ui/regression_views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/regression_views.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/regression_views.py (original)
+++ lnt/trunk/lnt/server/ui/regression_views.py Fri Jul 21 14:41:39 2017
@@ -68,7 +68,7 @@ class PrecomputedCR():
 
     def get_value_status(self, ignore_small=True):
         return REGRESSED
-    
+
     def __json__(self):
         return self.__dict__
 
@@ -77,7 +77,8 @@ class PrecomputedCR():
 def v4_new_regressions():
     form = TriagePageSelectedForm(request.form)
     ts = request.get_testsuite()
-    if request.method == 'POST' and request.form['btn'] == "Create New Regression":
+    if request.method == 'POST' and \
+            request.form['btn'] == "Create New Regression":
         regression = new_regression(ts, form.field_changes.data)
         flash("Created " + regression.title, FLASH_SUCCESS)
         return redirect(v4_url_for("v4_regression_list",
@@ -98,9 +99,9 @@ def v4_new_regressions():
     recent_fieldchange = ts.query(ts.FieldChange) \
         .join(ts.Test) \
         .outerjoin(ts.ChangeIgnore) \
-        .filter(ts.ChangeIgnore.id == None) \
+        .filter(ts.ChangeIgnore.id.is_(None)) \
         .outerjoin(ts.RegressionIndicator) \
-        .filter(ts.RegressionIndicator.id == None) \
+        .filter(ts.RegressionIndicator.id.is_(None)) \
         .order_by(desc(ts.FieldChange.id)) \
         .limit(500) \
         .all()
@@ -111,7 +112,8 @@ def v4_new_regressions():
         if fc.old_value is None:
             cr, key_run, _ = get_cr_for_field_change(ts, fc)
         else:
-            cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better)
+            cr = PrecomputedCR(fc.old_value, fc.new_value,
+                               fc.field.bigger_is_better)
             key_run = get_first_runs_of_fieldchange(ts, fc)
         current_cr, _, _ = get_cr_for_field_change(ts, fc, current=True)
         crs.append(ChangeData(fc, cr, key_run, current_cr))
@@ -125,20 +127,22 @@ def v4_new_regressions():
 def calc_impact(ts, fcs):
     crs = []
     for fc in fcs:
-        if fc == None:
+        if fc is None:
             continue
         if fc.old_value is None:
             cr, _, _ = get_cr_for_field_change(ts, fc)
         else:
-            cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better)
+            cr = PrecomputedCR(fc.old_value, fc.new_value,
+                               fc.field.bigger_is_better)
         crs.append(cr)
     if crs:
         olds = sum([x.previous for x in crs if x.previous])
         news = sum([x.current for x in crs if x.current])
         if olds and news:
-            new_cr = PrecomputedCR(olds, news, crs[0].bigger_is_better)  # TODO both directions
+            new_cr = PrecomputedCR(olds, news, crs[0].bigger_is_better)
+            # TODO both directions
             return new_cr
-    
+
     return PrecomputedCR(1, 1, True)
 
 
@@ -172,7 +176,7 @@ def v4_regression_list():
             if r.bug:
                 target = i
                 links.append(r.bug)
-                
+
         new_regress = new_regression(ts, [x.field_change_id for x in reg_inds])
         new_regress.state = regressions[target].state
         new_regress.title = regressions[target].title
@@ -182,7 +186,7 @@ def v4_regression_list():
             r.title = "Merged into Regression " + str(new_regress.id)
             r.state = RegressionState.IGNORED
         [ts.delete(x) for x in reg_inds]
-        
+
         ts.commit()
         flash("Created: " + new_regress.title, FLASH_SUCCESS)
         return redirect(v4_url_for("v4_regression_detail", id=new_regress.id))
@@ -218,7 +222,8 @@ def v4_regression_list():
                     regression.id) \
             .all()
         if machine_filter:
-            machine_names = set([x.field_change.machine.name for x in reg_inds])
+            machine_names = \
+                set([x.field_change.machine.name for x in reg_inds])
             if machine_filter in machine_names:
                 filtered_regressions.append(regression)
             else:
@@ -230,10 +235,11 @@ def v4_regression_list():
         regression_sizes.append(len(reg_inds))
         impacts.append(calc_impact(ts, [x.field_change for x in reg_inds]))
         # Now guess the regression age:
-        if len(reg_inds) and reg_inds[0].field_change and reg_inds[0].field_change.run:
-                age = reg_inds[0].field_change.run.end_time
+        if len(reg_inds) and reg_inds[0].field_change and \
+                reg_inds[0].field_change.run:
+            age = reg_inds[0].field_change.run.end_time
         else:
-                age = EmptyDate()
+            age = EmptyDate()
         ages.append(age)
 
     return render_template("v4_regression_list.html",
@@ -256,7 +262,7 @@ def _get_regressions_from_selected_form(
         .filter(ts.Regression.id.in_(regressions_id_to_merge)).all()
     reg_inds = ts.query(ts.RegressionIndicator) \
         .filter(ts.RegressionIndicator.regression_id.in_(
-        regressions_id_to_merge)) \
+            regressions_id_to_merge)) \
         .all()
     return reg_inds, regressions
 
@@ -309,10 +315,12 @@ def v4_regression_detail(id):
         return redirect(v4_url_for("v4_regression_list",
                         highlight=regression_info.id,
                         state=int(form.edit_state.data)))
-    if request.method == 'POST' and request.form['save_btn'] == "Split Regression":
+    if request.method == 'POST' and \
+            request.form['save_btn'] == "Split Regression":
         # For each of the regression indicators, grab their field ids.
         res_inds = ts.query(ts.RegressionIndicator) \
-            .filter(ts.RegressionIndicator.field_change_id.in_(form.field_changes.data)) \
+            .filter(ts.RegressionIndicator.field_change_id.in_(
+                form.field_changes.data)) \
             .all()
         fc_ids = [x.field_change_id for x in res_inds]
         second_regression = new_regression(ts, fc_ids)
@@ -331,7 +339,8 @@ def v4_regression_detail(id):
         # For each of the regression indicators, grab their field ids.
         title = regression_info.title
         res_inds = ts.query(ts.RegressionIndicator) \
-            .filter(ts.RegressionIndicator.regression_id == regression_info.id) \
+            .filter(
+                ts.RegressionIndicator.regression_id == regression_info.id) \
             .all()
         # Now remove our links to this regression.
         for res_ind in res_inds:
@@ -367,7 +376,8 @@ def v4_regression_detail(id):
         if fc.old_value is None:
             cr, key_run, all_runs = get_cr_for_field_change(ts, fc)
         else:
-            cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better)
+            cr = PrecomputedCR(fc.old_value, fc.new_value,
+                               fc.field.bigger_is_better)
             key_run = get_first_runs_of_fieldchange(ts, fc)
         current_cr, _, all_runs = get_cr_for_field_change(ts, fc, current=True)
         crs.append(ChangeData(fc, cr, key_run, current_cr))
@@ -376,7 +386,7 @@ def v4_regression_detail(id):
             ts_rev = key_run.parameters.get('test_suite_revision')
             if ts_rev and ts_rev != u'None':
                 test_suite_versions.add(ts_rev)
-    
+
     if len(test_suite_versions) > 1:
         revs = ', '.join(list(test_suite_versions))
         flash("More than one test-suite version: " + revs,
@@ -399,31 +409,32 @@ def v4_hook():
     ts = request.get_testsuite()
     rule_hooks.post_submission_hooks(ts, 0)
     abort(400)
-  
 
- at v4_route("/regressions/new_from_graph/<int:machine_id>/<int:test_id>/<int:field_index>/<int:run_id>", methods=["GET"])
+
+ at v4_route("/regressions/new_from_graph/<int:machine_id>/<int:test_id>"
+          "/<int:field_index>/<int:run_id>", methods=["GET"])
 def v4_make_regression(machine_id, test_id, field_index, run_id):
     """This function is called to make a new regression from a graph data point.
-    
+
     It is not nessessarly the case that there will be a real change there,
     so we must create a regression, bypassing the normal analysis.
-    
+
     """
     ts = request.get_testsuite()
     field = ts.sample_fields[field_index]
     new_regression_id = 0
     run = ts.query(ts.Run).get(run_id)
-    
+
     runs = ts.query(ts.Run). \
         filter(ts.Run.order_id == run.order_id). \
         filter(ts.Run.machine_id == run.machine_id). \
         all()
-        
+
     if len(runs) == 0:
         abort(404)
-        
+
     previous_runs = ts.get_previous_runs_on_machine(run, 1)
-    
+
     # Find our start/end order.
     if previous_runs != []:
         start_order = previous_runs[0].order
@@ -436,11 +447,9 @@ def v4_make_regression(machine_id, test_
 
     runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)
 
-    result = runinfo.get_comparison_result(runs,
-                                           previous_runs,
-                                           test_id,
-                                           field,
-                                           ts.Sample.get_hash_of_binary_field())
+    result = runinfo.get_comparison_result(
+        runs, previous_runs, test_id, field,
+        ts.Sample.get_hash_of_binary_field())
 
     # Try and find a matching FC and update, else create one.
     f = None
@@ -455,7 +464,7 @@ def v4_make_regression(machine_id, test_
             .one()
     except sqlalchemy.orm.exc.NoResultFound:
             f = None
-    
+
     if not f:
         test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
         f = ts.FieldChange(start_order=start_order,
@@ -470,11 +479,11 @@ def v4_make_regression(machine_id, test_
         f.new_value = result.current
         f.run = run
     ts.commit()
-    
+
     # Make new regressions.
     regression = new_regression(ts, [f.id])
     regression.state = RegressionState.ACTIVE
-    
+
     ts.commit()
     logger.info("Manually created new regressions: {}".format(regression.id))
     flash("Created " + regression.title, FLASH_SUCCESS)

Modified: lnt/trunk/lnt/server/ui/util.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/util.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/util.py (original)
+++ lnt/trunk/lnt/server/ui/util.py Fri Jul 21 14:41:39 2017
@@ -278,8 +278,8 @@ class PctCell:
                 attrs.append('%s="%s"' % (key, value))
         attr_string = ' '.join(attrs)
         if self.data:
-            return '<td %s>%s (%s)</td>' % (
-            attr_string, self.data, self.getValue())
+            return '<td %s>%s (%s)</td>' % \
+                (attr_string, self.data, self.getValue())
         else:
             return '<td %s>%s</td>' % (attr_string, self.getValue())
 
@@ -298,9 +298,12 @@ def renderProducerAsHTML(producer):
         builder = m.group(2)
         build = m.group(3)
 
-        png_url = 'http://%(url)s/png?builder=%(builder)s&number=%(build)s' % locals()
+        png_url = \
+            'http://%(url)s/png?builder=%(builder)s&number=%(build)s' % \
+            locals()
         img = '<img src="%(png_url)s" />' % locals()
-        return '<a href="%(producer)s">%(builder)s #%(build)s %(img)s</a>' % locals()
+        return '<a href="%(producer)s">%(builder)s #%(build)s %(img)s</a>' % \
+            locals()
 
     elif producer.startswith('http://'):
         return '<a href="' + producer + '">Producer</a>'
@@ -332,9 +335,10 @@ def guess_test_short_name(test_name):
 
 def baseline_key(ts_name=None):
     """A unique name for baseline session keys per DB and suite.
-    
-    Optionally, get the test-suite name from a parameter, when this is called during
-    submission the global context does not know which test-suite we are in until too late.
+
+    Optionally, get the test-suite name from a parameter, when this is called
+    during submission the global context does not know which test-suite we are
+    in until too late.
     """
     if ts_name:
         name = ts_name
@@ -353,4 +357,3 @@ def convert_revision(dotted):
     """
     dotted = integral_rex.findall(dotted)
     return tuple([int(d) for d in dotted])
-

Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=308790&r1=308789&r2=308790&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Fri Jul 21 14:41:39 2017
@@ -61,10 +61,12 @@ def get_redirect_target():
 ###
 # Root-Only Routes
 
+
 @frontend.route('/favicon.ico')
 def favicon_ico():
     return redirect(url_for('.static', filename='favicon.ico'))
 
+
 @frontend.route('/select_db')
 def select_db():
     path = request.args.get('path')
@@ -86,7 +88,8 @@ def select_db():
 #####
 # Per-Database Routes
 
- at db_route('/', only_v3 = False)
+
+ at db_route('/', only_v3=False)
 def index():
     return render_template("index.html")
 
@@ -113,14 +116,14 @@ def _do_submit():
             "submit_run.html", error="cannot provide input file *and* data")
 
     if input_file:
-        data_value = input_file.read()  
+        data_value = input_file.read()
     else:
         data_value = input_data
 
-    # The following accomodates old submitters. Note that we explicitely removed
-    # the tag field from the new submission format, this is only here for old
-    # submission jobs. The better way of doing it is mentioning the correct
-    # test-suite in the URL. So when submitting to suite YYYY use
+    # The following accomodates old submitters. Note that we explicitely
+    # removed the tag field from the new submission format, this is only here
+    # for old submission jobs. The better way of doing it is mentioning the
+    # correct test-suite in the URL. So when submitting to suite YYYY use
     # db_XXX/v4/YYYY/submitRun instead of db_XXXX/submitRun!
     if g.testsuite_name is None:
         try:
@@ -138,8 +141,9 @@ def _do_submit():
     # Get a DB connection.
     db = request.get_db()
 
-    result = lnt.util.ImportData.import_from_string(current_app.old_config,
-        g.db_name, db, g.testsuite_name, data_value, commit=commit)
+    result = lnt.util.ImportData.import_from_string(
+        current_app.old_config, g.db_name, db, g.testsuite_name, data_value,
+        commit=commit)
 
     # It is nice to have a full URL to the run, so fixup the request URL
     # here were we know more about the flask instance.
@@ -148,7 +152,7 @@ def _do_submit():
 
     response = flask.jsonify(**result)
     if result['error'] is not None:
-        response.status_code=400
+        response.status_code = 400
     return response
 
 
@@ -169,17 +173,19 @@ def submit_run_ts():
 ###
 # V4 Schema Viewer
 
+
 @v4_route("/")
 def v4_overview():
     return render_template("v4_overview.html",
                            testsuite_name=g.testsuite_name)
 
+
 @v4_route("/recent_activity")
 def v4_recent_activity():
     ts = request.get_testsuite()
 
-    # Get the most recent runs in this tag, we just arbitrarily limit to looking
-    # at the last 100 submission.
+    # Get the most recent runs in this tag, we just arbitrarily limit to
+    # looking at the last 100 submission.
     recent_runs = ts.query(ts.Run) \
         .join(ts.Order) \
         .join(ts.Machine) \
@@ -205,6 +211,7 @@ def v4_recent_activity():
                            active_submissions=active_submissions,
                            ts=ts)
 
+
 @v4_route("/machine/")
 def v4_machines():
     # Compute the list of associated runs, grouped by order.
@@ -243,7 +250,8 @@ def v4_machine_compare(machine_id):
         .order_by(ts.Run.start_time.desc()) \
         .first()
 
-    return redirect(v4_url_for('v4_run', id=machine_1_run.id, compare_to=machine_2_run.id))
+    return redirect(v4_url_for('v4_run', id=machine_1_run.id,
+                               compare_to=machine_2_run.id))
 
 
 @v4_route("/machine/<int:id>")
@@ -257,10 +265,10 @@ def v4_machine(id):
 
     associated_runs = util.multidict(
         (run_order, r)
-        for r,run_order in ts.query(ts.Run, ts.Order).\
-            join(ts.Order).\
-            filter(ts.Run.machine_id == id).\
-            order_by(ts.Run.start_time.desc()))
+        for r, run_order in (ts.query(ts.Run, ts.Order)
+                             .join(ts.Order)
+                             .filter(ts.Run.machine_id == id)
+                             .order_by(ts.Run.start_time.desc())))
     associated_runs = associated_runs.items()
     associated_runs.sort()
 
@@ -269,7 +277,9 @@ def v4_machine(id):
     if request.args.get('json'):
         json_obj = dict()
         try:
-            machine_obj = ts.query(ts.Machine).filter(ts.Machine.id == id).one()
+            machine_obj = ts.query(ts.Machine) \
+                .filter(ts.Machine.id == id) \
+                .one()
         except NoResultFound:
             abort(404)
         json_obj['name'] = machine_obj.name
@@ -279,7 +289,8 @@ def v4_machine(id):
             rev = order[0].llvm_project_revision
             for run in order[1]:
                 json_obj['runs'].append((run.id, rev,
-                                         run.start_time.isoformat(), run.end_time.isoformat()))
+                                         run.start_time.isoformat(),
+                                         run.end_time.isoformat()))
         return flask.jsonify(**json_obj)
     try:
         return render_template("v4_machine.html",
@@ -290,6 +301,7 @@ def v4_machine(id):
     except NoResultFound:
         abort(404)
 
+
 class V4RequestInfo(object):
     def __init__(self, run_id):
         self.db = request.get_db()
@@ -380,11 +392,13 @@ class V4RequestInfo(object):
         if note:
             flash(note, FLASH_INFO)
 
+
 @v4_route("/<int:id>/report")
 def v4_report(id):
     info = V4RequestInfo(id)
     return render_template('reporting/run_report.html', **info.data)
 
+
 @v4_route("/<int:id>/text_report")
 def v4_text_report(id):
     info = V4RequestInfo(id)
@@ -394,6 +408,7 @@ def v4_text_report(id):
     response.mimetype = "text/plain"
     return response
 
+
 # Compatilibity route for old run pages.
 @db_route("/simple/<tag>/<int:id>/", only_v3=False)
 def simple_run(tag, id):
@@ -435,12 +450,13 @@ def v4_run(id):
     options = {}
     options['show_delta'] = bool(request.args.get('show_delta'))
     options['show_previous'] = bool(request.args.get('show_previous'))
-    options['show_stddev'] =  bool(request.args.get('show_stddev'))
+    options['show_stddev'] = bool(request.args.get('show_stddev'))
     options['show_mad'] = bool(request.args.get('show_mad'))
     options['show_all'] = bool(request.args.get('show_all'))
     options['show_all_samples'] = bool(request.args.get('show_all_samples'))
-    options['show_sample_counts'] = bool(request.args.get('show_sample_counts'))
-    options['show_graphs'] = show_graphs = bool(request.args.get('show_graphs'))
+    options['show_sample_counts'] = \
+        bool(request.args.get('show_sample_counts'))
+    options['show_graphs'] = bool(request.args.get('show_graphs'))
     options['show_data_table'] = bool(request.args.get('show_data_table'))
     options['show_small_diff'] = bool(request.args.get('show_small_diff'))
     options['hide_report_by_default'] = bool(
@@ -545,7 +561,7 @@ def v4_order(id):
             ts.session.add(baseline)
             ts.session.commit()
 
-            flash("Baseline {} updated.".format(baseline.name), FLASH_SUCCESS )
+            flash("Baseline {} updated.".format(baseline.name), FLASH_SUCCESS)
         return redirect(v4_url_for("v4_order", id=id))
     else:
         print form.errors
@@ -593,6 +609,7 @@ def v4_all_orders():
 
     return render_template("v4_all_orders.html", ts=ts, orders=orders)
 
+
 @v4_route("/<int:id>/graph")
 def v4_run_graph(id):
     # This is an old style endpoint that treated graphs as associated with
@@ -604,9 +621,9 @@ def v4_run_graph(id):
         abort(404)
 
     # Convert the old style test parameters encoding.
-    args = { 'highlight_run' : id }
+    args = {'highlight_run': id}
     plot_number = 0
-    for name,value in request.args.items():
+    for name, value in request.args.items():
         # If this isn't a test specification, just forward it.
         if not name.startswith('test.'):
             args[name] = value
@@ -629,6 +646,7 @@ def v4_run_graph(id):
 BaselineLegendItem = namedtuple('BaselineLegendItem', 'name id')
 LegendItem = namedtuple('LegendItem', 'machine test_name field_name color url')
 
+
 @v4_route("/graph")
 def v4_graph():
     from lnt.server.ui import util
@@ -659,7 +677,8 @@ def v4_graph():
     options['hide_lineplot'] = bool(request.args.get('hide_lineplot'))
     show_lineplot = not options['hide_lineplot']
     options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
-    options['show_stddev'] = show_stddev = bool(request.args.get('show_stddev'))
+    options['show_stddev'] = show_stddev = \
+        bool(request.args.get('show_stddev'))
     options['hide_all_points'] = hide_all_points = bool(
         request.args.get('hide_all_points'))
     options['show_linear_regression'] = show_linear_regression = bool(
@@ -680,7 +699,7 @@ def v4_graph():
 
     # Load the graph parameters.
     graph_parameters = []
-    for name,value in request.args.items():
+    for name, value in request.args.items():
         # Plots to graph are passed as::
         #
         #  plot.<unused>=<machine id>.<test id>.<field index>
@@ -688,7 +707,7 @@ def v4_graph():
             continue
 
         # Ignore the extra part of the key, it is unused.
-        machine_id_str,test_id_str,field_index_str = value.split('.')
+        machine_id_str, test_id_str, field_index_str = value.split('.')
         try:
             machine_id = int(machine_id_str)
             test_id = int(test_id_str)
@@ -709,18 +728,18 @@ def v4_graph():
         graph_parameters.append((machine, test, field, field_index))
 
     # Order the plots by machine name, test name and then field.
-    graph_parameters.sort(key = lambda (m,t,f,_): (m.name, t.name, f.name, _))
+    graph_parameters.sort(key=lambda (m, t, f, _): (m.name, t.name, f.name, _))
 
     # Extract requested mean trend.
     mean_parameter = None
-    for name,value in request.args.items():
+    for name, value in request.args.items():
         # Mean to graph is passed as:
         #
         #  mean=<machine id>.<field index>
         if name != 'mean':
             continue
 
-        machine_id_str,field_index_str  = value.split('.')
+        machine_id_str, field_index_str = value.split('.')
         try:
             machine_id = int(machine_id_str)
             field_index = int(field_index_str)
@@ -745,7 +764,7 @@ def v4_graph():
 
     # Extract requested baselines, and their titles.
     baseline_parameters = []
-    for name,value in request.args.items():
+    for name, value in request.args.items():
         # Baselines to graph are passed as:
         #
         #  baseline.title=<run id>
@@ -761,9 +780,13 @@ def v4_graph():
             return abort(400)
 
         try:
-            run = ts.query(ts.Run).join(ts.Machine).filter(ts.Run.id == run_id).one()
+            run = ts.query(ts.Run) \
+                .join(ts.Machine) \
+                .filter(ts.Run.id == run_id) \
+                .one()
         except:
-            err_msg = "The run {} was not found in the database.".format(run_id)
+            err_msg = ("The run {} was not found in the database."
+                       .format(run_id))
             return render_template("error.html",
                                    message=err_msg)
 
@@ -780,13 +803,14 @@ def v4_graph():
             abort(404)
 
         # Find the neighboring runs, by order.
-        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N = 1))
+        prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N=1))
         if prev_runs:
             start_rev = prev_runs[0].order.llvm_project_revision
             end_rev = highlight_run.order.llvm_project_revision
             revision_range = {
                 "start": convert_revision(start_rev),
-                "end": convert_revision(end_rev) }
+                "end": convert_revision(end_rev),
+            }
 
     # Build the graph data.
     legend = []
@@ -795,11 +819,12 @@ def v4_graph():
     overview_plots = []
     baseline_plots = []
     num_plots = len(graph_parameters)
-    for i,(machine,test,field, field_index) in enumerate(graph_parameters):
+    for i, (machine, test, field, field_index) in enumerate(graph_parameters):
         # Determine the base plot color.
         col = list(util.makeDarkColor(float(i) / num_plots))
         url = "/".join([str(machine.id), str(test.id), str(field_index)])
-        legend.append(LegendItem(machine, test.name, field.name, tuple(col), url))
+        legend.append(LegendItem(machine, test.name, field.name, tuple(col),
+                                 url))
 
         # Load all the field values for this test on the same machine.
         #
@@ -807,33 +832,38 @@ def v4_graph():
         # we want to load. Actually, we should just make this a single query.
         #
         # FIXME: Don't hard code field name.
-        q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id).\
-            join(ts.Run).join(ts.Order).\
-            filter(ts.Run.machine_id == machine.id).\
-            filter(ts.Sample.test == test).\
-            filter(field.column != None)
+        q = ts.query(field.column, ts.Order.llvm_project_revision,
+                     ts.Run.start_time, ts.Run.id) \
+            .join(ts.Run).join(ts.Order) \
+            .filter(ts.Run.machine_id == machine.id) \
+            .filter(ts.Sample.test == test) \
+            .filter(field.column.isnot(None))
 
         # Unless all samples requested, filter out failing tests.
         if not show_failures:
             if field.status_field:
                 q = q.filter((field.status_field.column == PASS) |
-                             (field.status_field.column == None))
+                             (field.status_field.column.is_(None)))
 
         # Aggregate by revision.
-        data = util.multidict((rev, (val, date, run_id)) for val,rev,date,run_id in q).items()
+        data = util.multidict((rev, (val, date, run_id))
+                              for val, rev, date, run_id in q).items()
         data.sort(key=lambda sample: convert_revision(sample[0]))
 
         graph_datum.append((test.name, data, col, field, url))
 
         # Get baselines for this line
         num_baselines = len(baseline_parameters)
-        for baseline_id, (baseline, baseline_title) in enumerate(baseline_parameters):
-            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name).\
-                         join(ts.Run).join(ts.Order).join(ts.Machine).\
-                         filter(ts.Run.id == baseline.id).\
-                         filter(ts.Sample.test == test).\
-                         filter(field.column != None)
-            # In the event of many samples, use the mean of the samples as the baseline.
+        for baseline_id, (baseline, baseline_title) in \
+                enumerate(baseline_parameters):
+            q_baseline = ts.query(field.column, ts.Order.llvm_project_revision,
+                                  ts.Run.start_time, ts.Machine.name) \
+                         .join(ts.Run).join(ts.Order).join(ts.Machine) \
+                         .filter(ts.Run.id == baseline.id) \
+                         .filter(ts.Sample.test == test) \
+                         .filter(field.column.isnot(None))
+            # In the event of many samples, use the mean of the samples as the
+            # baseline.
             samples = []
             for sample in q_baseline:
                 samples.append(sample[0])
@@ -846,33 +876,40 @@ def v4_graph():
             color_offset = float(baseline_id) / num_baselines / 2
             my_color = (i + color_offset) / num_plots
             dark_col = list(util.makeDarkerColor(my_color))
-            str_dark_col =  util.toColorString(dark_col)
-            baseline_plots.append({'color': str_dark_col,
-                                   'lineWidth': 2,
-                                   'yaxis': {'from': mean, 'to': mean},
-                                   'name': q_baseline[0].llvm_project_revision})
-            baseline_name = "Baseline {} on {}".format(baseline_title,  q_baseline[0].name)
-            legend.append(LegendItem(BaselineLegendItem(baseline_name, baseline.id), test.name, field.name, dark_col, None))
+            str_dark_col = util.toColorString(dark_col)
+            baseline_plots.append({
+                'color': str_dark_col,
+                'lineWidth': 2,
+                'yaxis': {'from': mean, 'to': mean},
+                'name': q_baseline[0].llvm_project_revision,
+            })
+            baseline_name = ("Baseline {} on {}"
+                             .format(baseline_title, q_baseline[0].name))
+            legend.append(LegendItem(BaselineLegendItem(
+                baseline_name, baseline.id), test.name, field.name, dark_col,
+                None))
 
     # Draw mean trend if requested.
     if mean_parameter:
         machine, field = mean_parameter
         test_name = 'Geometric Mean'
 
-        col = (0,0,0)
+        col = (0, 0, 0)
         legend.append(LegendItem(machine, test_name, field.name, col, None))
 
         q = ts.query(sqlalchemy.sql.func.min(field.column),
-                ts.Order.llvm_project_revision,
-                sqlalchemy.sql.func.min(ts.Run.start_time)).\
-            join(ts.Run).join(ts.Order).join(ts.Test).\
-            filter(ts.Run.machine_id == machine.id).\
-            filter(field.column != None).\
-            group_by(ts.Order.llvm_project_revision, ts.Test)
+                     ts.Order.llvm_project_revision,
+                     sqlalchemy.sql.func.min(ts.Run.start_time)) \
+              .join(ts.Run).join(ts.Order).join(ts.Test) \
+              .filter(ts.Run.machine_id == machine.id) \
+              .filter(field.column.isnot(None)) \
+              .group_by(ts.Order.llvm_project_revision, ts.Test)
 
         # Calculate geomean of each revision.
-        data = util.multidict(((rev, date), val) for val,rev,date in q).items()
-        data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
+        data = util.multidict(((rev, date), val) for val, rev, date in q) \
+            .items()
+        data = [(rev,
+                 [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
                 for ((rev, date), vals) in data]
 
         # Sort data points according to revision number.
@@ -890,7 +927,7 @@ def v4_graph():
 
         if normalize_by_median:
             normalize_by = 1.0/stats.median([min([d[0] for d in values])
-                                           for _,values in data])
+                                            for _, values in data])
         else:
             normalize_by = 1.0
 
@@ -901,7 +938,8 @@ def v4_graph():
             # And the date on which they were taken.
             dates = [data_date[1] for data_date in datapoints]
             # Run where this point was collected.
-            runs = [data_pts[2] for data_pts in datapoints if len(data_pts)==3]
+            runs = [data_pts[2]
+                    for data_pts in datapoints if len(data_pts) == 3]
 
             # When we can, map x-axis to revisions, but when that is too hard
             # use the position of the sample instead.
@@ -938,7 +976,7 @@ def v4_graph():
             # Add the individual points, if requested.
             # For each point add a text label for the mouse over.
             if not hide_all_points:
-                for i,v in enumerate(values):
+                for i, v in enumerate(values):
                     point_metadata = dict(metadata)
                     point_metadata["date"] = str(dates[i])
                     points_data.append((x, v, point_metadata))
@@ -955,15 +993,19 @@ def v4_graph():
                 mad = stats.median_absolute_deviation(values, med)
                 errorbar_data.append((x, med, mad))
 
-        # Compute the moving average and or moving median of our data if requested.
+        # Compute the moving average and or moving median of our data if
+        # requested.
         if moving_average or moving_median:
             fun = None
 
             def compute_moving_average(x, window, average_list, median_list):
                 average_list.append((x, lnt.util.stats.mean(window)))
+
             def compute_moving_median(x, window, average_list, median_list):
                 median_list.append((x, lnt.util.stats.median(window)))
-            def compute_moving_average_and_median(x, window, average_list, median_list):
+
+            def compute_moving_average_and_median(x, window, average_list,
+                                                  median_list):
                 average_list.append((x, lnt.util.stats.mean(window)))
                 median_list.append((x, lnt.util.stats.median(window)))
 
@@ -980,25 +1022,28 @@ def v4_graph():
                 end_index = min(len_pts, i + moving_window_size)
 
                 window_pts = [x[1] for x in pts[start_index:end_index]]
-                fun(pts[i][0], window_pts, moving_average_data, moving_median_data)
+                fun(pts[i][0], window_pts, moving_average_data,
+                    moving_median_data)
 
         # On the overview, we always show the line plot.
         overview_plots.append({
-                "data" : pts,
-                "color" : util.toColorString(col) })
+            "data": pts,
+            "color": util.toColorString(col),
+        })
 
         # Add the minimum line plot, if requested.
         if show_lineplot:
-            plot = {"data" : pts,
-                    "color" : util.toColorString(col)
-                    }
+            plot = {
+                "data": pts,
+                "color": util.toColorString(col),
+            }
             if url:
                 plot["url"] = url
             graph_plots.append(plot)
         # Add regression line, if requested.
         if show_linear_regression:
-            xs = [t for t,v,_ in pts]
-            ys = [v for t,v,_ in pts]
+            xs = [t for t, v, _ in pts]
+            ys = [v for t, v, _ in pts]
 
             # We compute the regression line in terms of a normalized X scale.
             x_min, x_max = min(xs), max(xs)
@@ -1016,30 +1061,33 @@ def v4_graph():
                 info = None
 
             if info is not None:
-                slope, intercept,_,_,_ = info
+                slope, intercept, _, _, _ = info
 
                 reglin_col = [c * .7 for c in col]
                 reglin_pts = [(x_min, 0.0 * slope + intercept),
                               (x_max, 1.0 * slope + intercept)]
                 graph_plots.insert(0, {
-                        "data" : reglin_pts,
-                        "color" : util.toColorString(reglin_col),
-                        "lines" : {
-                            "lineWidth" : 2 },
-                        "shadowSize" : 4 })
+                    "data": reglin_pts,
+                    "color": util.toColorString(reglin_col),
+                    "lines": {
+                        "lineWidth": 2
+                    },
+                    "shadowSize": 4,
+                })
 
         # Add the points plot, if used.
         if points_data:
-            pts_col = (0,0,0)
-            plot = {"data" : points_data,
-                    "color" : util.toColorString(pts_col),
-                    "lines" : {"show" : False },
-                    "points" : {
-                        "show" : True,
-                        "radius" : .25,
-                        "fill" : True
-                        }
-                    }
+            pts_col = (0, 0, 0)
+            plot = {
+                "data": points_data,
+                "color": util.toColorString(pts_col),
+                "lines": {"show": False},
+                "points": {
+                    "show": True,
+                    "radius": .25,
+                    "fill": True,
+                },
+            }
             if url:
                 plot['url'] = url
             graph_plots.append(plot)
@@ -1048,30 +1096,35 @@ def v4_graph():
         if errorbar_data:
             bar_col = [c*.7 for c in col]
             graph_plots.append({
-                    "data" : errorbar_data,
-                    "lines" : { "show" : False },
-                    "color" : util.toColorString(bar_col),
-                    "points" : {
-                        "errorbars" : "y",
-                        "yerr" : { "show" : True,
-                                   "lowerCap" : "-",
-                                   "upperCap" : "-",
-                                   "lineWidth" : 1 } } })
+                "data": errorbar_data,
+                "lines": {"show": False},
+                "color": util.toColorString(bar_col),
+                "points": {
+                    "errorbars": "y",
+                    "yerr": {
+                        "show": True,
+                        "lowerCap": "-",
+                        "upperCap": "-",
+                        "lineWidth": 1,
+                    }
+                }
+            })
 
         # Add the moving average plot, if used.
         if moving_average_data:
             col = [0.32, 0.6, 0.0]
             graph_plots.append({
-                    "data" : moving_average_data,
-                    "color" : util.toColorString(col) })
-
+                "data": moving_average_data,
+                "color": util.toColorString(col),
+            })
 
         # Add the moving median plot, if used.
         if moving_median_data:
             col = [0.75, 0.0, 1.0]
             graph_plots.append({
-                    "data" : moving_median_data,
-                    "color" : util.toColorString(col) })
+                "data": moving_median_data,
+                "color": util.toColorString(col),
+            })
 
     if bool(request.args.get('json')):
         json_obj = dict()
@@ -1080,11 +1133,13 @@ def v4_graph():
         simple_type_legend = []
         for li in legend:
             # Flatten name, make color a dict.
-            new_entry = {'name': li.machine.name,
-                         'test': li.test_name,
-                         'unit': li.field_name,
-                         'color': util.toColorString(li.color),
-                         'url': li.url}
+            new_entry = {
+                'name': li.machine.name,
+                'test': li.test_name,
+                'unit': li.field_name,
+                'color': util.toColorString(li.color),
+                'url': li.url,
+            }
             simple_type_legend.append(new_entry)
         json_obj['legend'] = simple_type_legend
         json_obj['revision_range'] = revision_range
@@ -1099,6 +1154,7 @@ def v4_graph():
                            overview_plots=overview_plots, legend=legend,
                            baseline_plots=baseline_plots)
 
+
 @v4_route("/global_status")
 def v4_global_status():
     from lnt.server.ui import util
@@ -1145,7 +1201,7 @@ def v4_global_status():
     # also convenient for our computations in the jinja page to have
     # access to
     def get_machine_keys(m):
-        m.css_name = m.name.replace('.','-')
+        m.css_name = m.name.replace('.', '-')
         return m
     recent_machines = map(get_machine_keys, recent_machines)
 
@@ -1199,7 +1255,7 @@ def v4_global_status():
         test_table.append(row)
 
     # Order the table by worst regression.
-    test_table.sort(key = lambda row: row[1], reverse=True)
+    test_table.sort(key=lambda row: row[1], reverse=True)
 
     return render_template("v4_global_status.html",
                            ts=ts,
@@ -1209,6 +1265,7 @@ def v4_global_status():
                            selected_field=field,
                            selected_revision=revision)
 
+
 @v4_route("/daily_report")
 def v4_daily_report_overview():
     # Redirect to the report for the most recent submitted run's date.
@@ -1235,6 +1292,7 @@ def v4_daily_report_overview():
                                year=date.year, month=date.month, day=date.day,
                                **extra_args))
 
+
 @v4_route("/daily_report/<int:year>/<int:month>/<int:day>")
 def v4_daily_report(year, month, day):
     num_days_str = request.args.get('num_days')
@@ -1270,10 +1328,12 @@ def v4_daily_report(year, month, day):
 ###
 # Cross Test-Suite V4 Views
 
+
 def get_summary_config_path():
     return os.path.join(current_app.old_config.tempDir,
                         'summary_report_config.json')
 
+
 @db_route("/summary_report/edit", only_v3=False, methods=('GET', 'POST'))
 def v4_summary_report_ui():
     # If this is a POST request, update the saved config.
@@ -1295,10 +1355,10 @@ def v4_summary_report_ui():
             config = flask.json.load(f)
     else:
         config = {
-            "machine_names" : [],
-            "orders" : [],
-            "machine_patterns" : [],
-            }
+            "machine_names": [],
+            "orders": [],
+            "machine_patterns": [],
+        }
 
     # Get the list of available test suites.
     testsuites = request.get_db().testsuite.values()
@@ -1323,6 +1383,7 @@ def v4_summary_report_ui():
                            config=config, all_machines=all_machines,
                            all_orders=all_orders)
 
+
 @db_route("/summary_report", only_v3=False)
 def v4_summary_report():
     # Load the summary report configuration.
@@ -1359,16 +1420,18 @@ You must define a summary report configu
 @frontend.route('/rules')
 def rules():
     discovered_rules = lnt.server.db.rules_manager.DESCRIPTIONS
-    return render_template("rules.html",rules=discovered_rules)
+    return render_template("rules.html", rules=discovered_rules)
+
 
 @frontend.route('/log')
 def log():
     async_ops.check_workers(True)
     return render_template("log.html")
 
+
 @frontend.route('/debug')
 def debug():
-    assert current_app.debug == False
+    assert not current_app.debug
 
 
 @frontend.route('/__health')
@@ -1392,6 +1455,7 @@ def health():
         return msg, 500
     return msg, 200
 
+
 @v4_route("/search")
 def v4_search():
     def _isint(i):
@@ -1430,10 +1494,12 @@ class MatrixDataRequest(object):
 
 
 # How much data to render in the Matrix view.
-MATRIX_LIMITS = [('12', 'Small'),
-                 ('50', 'Medium'),
-                 ('250', 'Large'),
-                 ('-1', 'All')]
+MATRIX_LIMITS = [
+    ('12', 'Small'),
+    ('50', 'Medium'),
+    ('250', 'Large'),
+    ('-1', 'All'),
+]
 
 
 class MatrixOptions(Form):
@@ -1511,8 +1577,8 @@ def v4_matrix():
 
     if not data_parameters:
         abort(404, "Request requires some data arguments.")
-    # Feature: if all of the results are from the same machine, hide the name to
-    # make the headers more compact.
+    # Feature: if all of the results are from the same machine, hide the name
+    # to make the headers more compact.
     dedup = True
     for r in data_parameters:
         if r.machine.id != data_parameters[0].machine.id:
@@ -1530,12 +1596,13 @@ def v4_matrix():
     all_orders = set()
     order_to_id = {}
     for req in data_parameters:
-        q = ts.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \
+        q = ts.query(req.field.column, ts.Order.llvm_project_revision,
+                     ts.Order.id) \
             .join(ts.Run) \
             .join(ts.Order) \
             .filter(ts.Run.machine_id == req.machine.id) \
             .filter(ts.Sample.test == req.test) \
-            .filter(req.field.column != None) \
+            .filter(req.field.column.isnot(None)) \
             .order_by(ts.Order.llvm_project_revision.desc())
 
         limit = request.args.get('limit', post_limit)
@@ -1567,12 +1634,13 @@ def v4_matrix():
         baseline_name = backup_baseline
 
     for req in data_parameters:
-        q_baseline = ts.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \
+        q_baseline = ts.query(req.field.column, ts.Order.llvm_project_revision,
+                              ts.Order.id) \
                        .join(ts.Run) \
                        .join(ts.Order) \
                        .filter(ts.Run.machine_id == req.machine.id) \
                        .filter(ts.Sample.test == req.test) \
-                       .filter(req.field.column != None) \
+                       .filter(req.field.column.isnot(None)) \
                        .filter(ts.Order.llvm_project_revision == baseline_rev)
         baseline_data = q_baseline.all()
         if baseline_data:




More information about the llvm-commits mailing list