[LNT] r307724 - lnt/server/reporting: Fix pep8 warnings

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 11 15:28:33 PDT 2017


Author: matze
Date: Tue Jul 11 15:28:33 2017
New Revision: 307724

URL: http://llvm.org/viewvc/llvm-project?rev=307724&view=rev
Log:
lnt/server/reporting: Fix pep8 warnings

Modified:
    lnt/trunk/lnt/server/reporting/analysis.py
    lnt/trunk/lnt/server/reporting/dailyreport.py
    lnt/trunk/lnt/server/reporting/runs.py
    lnt/trunk/lnt/server/reporting/summaryreport.py

Modified: lnt/trunk/lnt/server/reporting/analysis.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/analysis.py?rev=307724&r1=307723&r2=307724&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/analysis.py (original)
+++ lnt/trunk/lnt/server/reporting/analysis.py Tue Jul 11 15:28:33 2017
@@ -59,8 +59,8 @@ class ComparisonResult:
                  confidence_lv=0.05, bigger_is_better=False):
         self.aggregation_fn = aggregation_fn
 
-        # Special case: if we're using the minimum to aggregate, swap it for max
-        # if bigger_is_better.
+        # Special case: if we're using the minimum to aggregate, swap it for
+        # max if bigger_is_better.
         if aggregation_fn == stats.safe_min and bigger_is_better:
             aggregation_fn = stats.safe_max
 
@@ -130,7 +130,7 @@ class ComparisonResult:
                           self.prev_samples,
                           self.confidence_lv,
                           bool(self.bigger_is_better))
-                          
+
     def __json__(self):
         simple_dict = self.__dict__
         simple_dict['aggregation_fn'] = self.aggregation_fn.__name__
@@ -180,15 +180,15 @@ class ComparisonResult:
         #
         # FIXME: One bug here is that we risk losing performance data on tests
         # which flop to failure then back. What would be nice to do here is to
-        # find the last value in a passing run, or to move to using proper keyed
-        # reference runs.
+        # find the last value in a passing run, or to move to using proper
+        # keyed reference runs.
         if self.failed:
             return UNCHANGED_FAIL
         elif self.prev_failed:
-            return UNCHANGED_PASS 
+            return UNCHANGED_PASS
 
-        # Always ignore percentage changes below 1%, for now, we just don't have
-        # enough time to investigate that level of stuff.
+        # Always ignore percentage changes below 1%, for now, we just don't
+        # have enough time to investigate that level of stuff.
         if ignore_small and abs(self.pct_delta) < .01:
             return UNCHANGED_PASS
 
@@ -199,8 +199,8 @@ class ComparisonResult:
         if ignore_small and abs(self.delta) < .01:
             return UNCHANGED_PASS
 
-        # Ignore tests whose delta is too small relative to the precision we can
-        # sample at; otherwise quantization means that we can't measure the
+        # Ignore tests whose delta is too small relative to the precision we
+        # can sample at; otherwise quantization means that we can't measure the
         # standard deviation with enough accuracy.
         if abs(self.delta) <= 2 * value_precision * confidence_interval:
             return UNCHANGED_PASS
@@ -268,12 +268,14 @@ class RunInfo(object):
         This query is expensive.
         """
         runs = [run]
-        runs_prev = self.testsuite.get_previous_runs_on_machine(run, num_comparison_runs)
+        runs_prev = self.testsuite \
+            .get_previous_runs_on_machine(run, num_comparison_runs)
         runs += runs_prev
 
         if compare_run is not None:
             compare_runs = [compare_run]
-            comp_prev = self.testsuite.get_previous_runs_on_machine(compare_run, num_comparison_runs)
+            comp_prev = self.testsuite \
+                .get_previous_runs_on_machine(compare_run, num_comparison_runs)
             compare_runs += comp_prev
         else:
             compare_runs = []
@@ -311,10 +313,11 @@ class RunInfo(object):
         if runs:
             cur_profile = self.profile_map.get((runs[0].id, test_id), None)
         if compare_runs:
-            prev_profile = self.profile_map.get((compare_runs[0].id, test_id), None)
-        
-        # Determine whether this (test,pset) passed or failed in the current and
-        # previous runs.
+            prev_profile = self.profile_map.get((compare_runs[0].id, test_id),
+                                                None)
+
+        # Determine whether this (test,pset) passed or failed in the current
+        # and previous runs.
         #
         # FIXME: Support XFAILs and non-determinism (mixed fail and pass)
         # better.
@@ -341,9 +344,9 @@ class RunInfo(object):
             # Warn in the log when the hash wasn't the same for all samples.
             cur_hash_set = set(hash_values)
             if len(cur_hash_set) > 1:
-                logger.warning(("Found different hashes for multiple samples " +
-                                "in the same run {0}: {1}\nTestID:{2}").format(
-                               runs, hash_values, test_id))
+                logger.warning("Found different hashes for multiple samples "
+                               "in the same run {0}: {1}\nTestID:{2}"
+                               .format(runs, hash_values, test_id))
 
             cur_hash = hash_values[0] if len(hash_values) > 0 else None
             prev_hash = prev_hash_values[0] \

Modified: lnt/trunk/lnt/server/reporting/dailyreport.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/dailyreport.py?rev=307724&r1=307723&r2=307724&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/dailyreport.py (original)
+++ lnt/trunk/lnt/server/reporting/dailyreport.py Tue Jul 11 15:28:33 2017
@@ -372,7 +372,7 @@ class DailyReport(object):
                 nr_tests_seen = 0
                 for test in self.reporting_tests:
                     samples = sri.get_samples(day_runs, test.id)
-                    if len(samples)>0:
+                    if len(samples) > 0:
                         nr_tests_seen += 1
                 nr_tests_for_machine.append(nr_tests_seen)
             self.nr_tests_table.append((machine, nr_tests_for_machine))

Modified: lnt/trunk/lnt/server/reporting/runs.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/runs.py?rev=307724&r1=307723&r2=307724&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/runs.py (original)
+++ lnt/trunk/lnt/server/reporting/runs.py Tue Jul 11 15:28:33 2017
@@ -9,7 +9,6 @@ import lnt.util.stats
 from lnt.testing.util.commands import visible_note
 
 
-
 def generate_run_data(run, baseurl, num_comparison_runs=0, result=None,
                       compare_to=None, baseline=None,
                       aggregation_fn=lnt.util.stats.safe_min,
@@ -32,10 +31,11 @@ def generate_run_data(run, baseurl, num_
         # the default baseline revision for which this machine also
         # reported.
         baseline = machine.get_baseline_run()
-    
+
     # If the baseline is the same as the comparison run, ignore it.
     if baseline is compare_to:
-        visible_note("Baseline and compare_to are the same: disabling baseline.")
+        visible_note("Baseline and compare_to are the same: "
+                     "disabling baseline.")
         baseline = None
 
     # Gather the runs to use for statistical data.
@@ -92,10 +92,10 @@ def generate_run_data(run, baseurl, num_
     # Collect the simplified results, if desired, for sending back to clients.
     if result is not None:
         pset_results = []
-        result['test_results'] = [{ 'pset' : (), 'results' : pset_results}]
-        for field,field_results in test_results:
-            for _,bucket,_ in field_results:
-                for name,cr,_ in bucket:
+        result['test_results'] = [{'pset': (), 'results': pset_results}]
+        for field, field_results in test_results:
+            for _, bucket, _ in field_results:
+                for name, cr, _ in bucket:
                     # FIXME: Include additional information about performance
                     # changes.
                     pset_results.append(("%s.%s" % (name, field.name),
@@ -106,40 +106,43 @@ def generate_run_data(run, baseurl, num_
     # display
     def aggregate_counts_across_all_bucket_types(i, name):
         num_items = sum(len(field_results[i][1])
-                        for _,field_results in test_results)
+                        for _, field_results in test_results)
         if baseline:
             num_items_vs_baseline = sum(
                 len(field_results[i][1])
-                for _,field_results in baselined_results)
+                for _, field_results in baselined_results)
         else:
             num_items_vs_baseline = None
 
         return i, name, num_items, num_items_vs_baseline
 
-    num_item_buckets = [aggregate_counts_across_all_bucket_types(x[0], x[1][0])\
-                            for x in enumerate(test_results[0][1])]
+    num_item_buckets = [aggregate_counts_across_all_bucket_types(x[0], x[1][0])
+                        for x in enumerate(test_results[0][1])]
 
     def maybe_sort_bucket(bucket, bucket_name, show_perf):
         if not bucket or bucket_name == 'Unchanged Test' or not show_perf:
             return bucket
         else:
-            return sorted(bucket, key=lambda (_,cr,__): -abs(cr.pct_delta))
+            return sorted(bucket, key=lambda (_, cr, __): -abs(cr.pct_delta))
+
     def prioritize_buckets(test_results):
-        prioritized = [(priority, field, bucket_name, maybe_sort_bucket(bucket, bucket_name, show_perf),
+        prioritized = [(priority, field, bucket_name,
+                        maybe_sort_bucket(bucket, bucket_name, show_perf),
                         [name for name, _, __ in bucket], show_perf)
-                       for field,field_results in test_results
-                       for priority,(bucket_name, bucket,
-                                     show_perf) in enumerate(field_results)]
-        prioritized.sort(key = lambda item: (item[0], item[1].name))
+                       for field, field_results in test_results
+                       for priority, (bucket_name, bucket,
+                                      show_perf) in enumerate(field_results)]
+        prioritized.sort(key=lambda item: (item[0], item[1].name))
         return prioritized
 
     # Generate prioritized buckets for run over run and run over baseline data.
     prioritized_buckets_run_over_run = prioritize_buckets(test_results)
     if baseline:
-        prioritized_buckets_run_over_baseline = prioritize_buckets(baselined_results)
+        prioritized_buckets_run_over_baseline = \
+            prioritize_buckets(baselined_results)
     else:
         prioritized_buckets_run_over_baseline = None
-    
+
     # Prepare auxillary variables for rendering.
     # Create Subject
     subject = """%s test results""" % (machine.name,)
@@ -155,8 +158,8 @@ def generate_run_data(run, baseurl, num_
         url_fields.append(('compare_to', str(compare_to.id)))
     if baseline:
         url_fields.append(('baseline', str(baseline.id)))
-    report_url = "%s?%s" % (run_url, "&".join("%s=%s" % (k,v)
-                                              for k,v in url_fields))
+    report_url = "%s?%s" % (run_url, "&".join("%s=%s" % (k, v)
+                                                  for k, v in url_fields))
 
     # Compute static CSS styles for elemenets. We use the style directly on
     # elements instead of via a stylesheet to support major email clients (like
@@ -164,18 +167,17 @@ def generate_run_data(run, baseurl, num_
     #
     # These are derived from the static style.css file we use elsewhere.
     #
-    # These are just defaults however, and the caller can override them with the
-    # 'styles' and 'classes' kwargs.
+    # These are just defaults however, and the caller can override them with
+    # the 'styles' and 'classes' kwargs.
     styles_ = {
-        "body" : ("color:#000000; background-color:#ffffff; "
-                  "font-family: Helvetica, sans-serif; font-size:9pt"),
-        "h1" : ("font-size: 14pt"),
-        "table" : "font-size:9pt; border-spacing: 0px; border: 1px solid black",
-        "th" : (
-            "background-color:#eee; color:#666666; font-weight: bold; "
-            "cursor: default; text-align:center; font-weight: bold; "
-            "font-family: Verdana; padding:5px; padding-left:8px"),
-        "td" : "padding:5px; padding-left:8px",
+        "body": ("color:#000000; background-color:#ffffff; "
+                 "font-family: Helvetica, sans-serif; font-size:9pt"),
+        "h1": ("font-size: 14pt"),
+        "table": "font-size:9pt; border-spacing: 0px; border: 1px solid black",
+        "th": ("background-color:#eee; color:#666666; font-weight: bold; "
+               "cursor: default; text-align:center; font-weight: bold; "
+               "font-family: Verdana; padding:5px; padding-left:8px"),
+        "td": "padding:5px; padding-left:8px",
     }
     classes_ = {
     }
@@ -199,7 +201,8 @@ def generate_run_data(run, baseurl, num_
         'run_to_run_info': run_to_run_info,
         'prioritized_buckets_run_over_run': prioritized_buckets_run_over_run,
         'run_to_baseline_info': run_to_baseline_info,
-        'prioritized_buckets_run_over_baseline': prioritized_buckets_run_over_baseline,
+        'prioritized_buckets_run_over_baseline':
+            prioritized_buckets_run_over_baseline,
         'styles': styles_,
         'classes': classes_,
         'start_time': start_time,

Modified: lnt/trunk/lnt/server/reporting/summaryreport.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/summaryreport.py?rev=307724&r1=307723&r2=307724&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/summaryreport.py (original)
+++ lnt/trunk/lnt/server/reporting/summaryreport.py Tue Jul 11 15:28:33 2017
@@ -6,6 +6,7 @@ import lnt.util.stats
 ###
 # Aggregation Function
 
+
 class Aggregation(object):
     def __init__(self):
         self.is_initialized = False
@@ -22,6 +23,7 @@ class Aggregation(object):
             self._initialize(values)
         self._append(values)
 
+
 class Sum(Aggregation):
     def __init__(self):
         Aggregation.__init__(self)
@@ -34,9 +36,10 @@ class Sum(Aggregation):
         self.sum = [0.] * len(values)
 
     def _append(self, values):
-        for i,value in enumerate(values):
+        for i, value in enumerate(values):
             self.sum[i] += value
 
+
 class Mean(Aggregation):
     def __init__(self):
         Aggregation.__init__(self)
@@ -50,10 +53,11 @@ class Mean(Aggregation):
         self.sum = [0.] * len(values)
 
     def _append(self, values):
-        for i,value in enumerate(values):
+        for i, value in enumerate(values):
             self.sum[i] += value
         self.count += 1
 
+
 class GeometricMean(Aggregation):
     def __init__(self):
         Aggregation.__init__(self)
@@ -70,10 +74,11 @@ class GeometricMean(Aggregation):
         self.product = [1.] * len(values)
 
     def _append(self, values):
-        for i,value in enumerate(values):
+        for i, value in enumerate(values):
             self.product[i] *= value
         self.count += 1
 
+
 class NormalizedMean(Mean):
     def _append(self, values):
         baseline = values[0]
@@ -82,18 +87,20 @@ class NormalizedMean(Mean):
 
 ###
 
+
 class SummaryReport(object):
     def __init__(self, db, report_orders, report_machine_names,
                  report_machine_patterns):
         self.db = db
         self.testsuites = list(db.testsuite.values())
-        self.report_orders = list((name,orders)
-                                  for name,orders in report_orders)
+        self.report_orders = list((name, orders)
+                                  for name, orders in report_orders)
         self.report_machine_names = set(report_machine_names)
         self.report_machine_patterns = list(report_machine_patterns)
         self.report_machine_rexes = [
             re.compile(pattern)
-            for pattern in self.report_machine_patterns]
+            for pattern in self.report_machine_patterns
+        ]
 
         self.data_table = None
         self.requested_machine_ids = None
@@ -116,12 +123,13 @@ class SummaryReport(object):
                                        for ts in self.testsuites)
         self.requested_machine_ids = dict(
             (ts, [m.id for m in machines])
-            for ts,machines in self.requested_machines.items())
+            for ts, machines in self.requested_machines.items()
+        )
 
         # First, collect all the runs to summarize on, for each index in the
         # report orders.
         self.runs_at_index = []
-        for _,orders in self.report_orders:
+        for _, orders in self.report_orders:
             # For each test suite...
             runs = []
             for ts in self.testsuites:
@@ -144,7 +152,7 @@ class SummaryReport(object):
                     self.warnings.append(
                         'no runs for test suite %r in orders %r' % (
                             ts.name, orders))
-                        
+
                 runs.append((ts_runs, ts_order_ids))
             self.runs_at_index.append(runs)
 
@@ -156,11 +164,12 @@ class SummaryReport(object):
         # Compute the base table for aggregation.
         #
         # The table is indexed by a test name and test features, which are
-        # either extracted from the test name or from the test run (depending on
-        # the suite).
+        # either extracted from the test name or from the test run (depending
+        # on the suite).
         #
         # Each value in the table contains a array with one item for each
-        # report_order entry, which contains all of the samples for that entry..
+        # report_order entry, which contains all of the samples for that
+        # entry..
         #
         # The table keys are tuples of:
         #  (<test name>,
@@ -236,7 +245,7 @@ class SummaryReport(object):
             test = ts_tests[sample[1]]
 
             # Extract the compile flags from the test name.
-            base_name,flags = test.name.split('(')
+            base_name, flags = test.name.split('(')
             assert flags[-1] == ')'
             other_flags = []
             build_mode = None
@@ -333,7 +342,7 @@ class SummaryReport(object):
                 for sample in samples:
                     run = run_id_map[sample[0]]
                     datapoints = list()
-                    for key,value in get_datapoints_for_sample(ts, sample):
+                    for key, value in get_datapoints_for_sample(ts, sample):
                         items = self.data_table.get(key)
                         if items is None:
                             items = [[]
@@ -349,7 +358,7 @@ class SummaryReport(object):
                     return True
 
         def compute_index_name(key):
-            test_name,metric,arch,build_mode,machine_id = key
+            test_name, metric, arch, build_mode, machine_id = key
 
             # If this is a nightly test..
             if test_name.startswith('SingleSource/') or \
@@ -377,7 +386,7 @@ class SummaryReport(object):
 
             # Index full builds across all job sizes.
             if test_name.startswith('build/'):
-                project_name,subtest_name = re.match(
+                project_name, subtest_name = re.match(
                     r'build/(.*)\(j=[0-9]+\)\.(.*)', str(test_name)).groups()
                 return (('Full Build (%s)' % (project_name,),
                          metric, build_mode, arch, machine_id),
@@ -385,7 +394,7 @@ class SummaryReport(object):
 
             # Index single file tests across all inputs.
             if test_name.startswith('compile/'):
-                file_name,stage_name,subtest_name = re.match(
+                file_name, stage_name, subtest_name = re.match(
                     r'compile/(.*)/(.*)/\(\)\.(.*)', str(test_name)).groups()
                 return (('Single File (%s)' % (stage_name,),
                          metric, build_mode, arch, machine_id),
@@ -393,7 +402,7 @@ class SummaryReport(object):
 
             # Index PCH generation tests by input.
             if test_name.startswith('pch-gen/'):
-                file_name,subtest_name = re.match(
+                file_name, subtest_name = re.match(
                     r'pch-gen/(.*)/\(\)\.(.*)', str(test_name)).groups()
                 return (('PCH Generation (%s)' % (file_name,),
                          metric, build_mode, arch, machine_id),
@@ -408,7 +417,7 @@ class SummaryReport(object):
                     return True
 
         self.indexed_data_table = {}
-        for key,values in self.data_table.items():
+        for key, values in self.data_table.items():
             # Ignore any test which is missing some data.
             if is_missing_samples(values):
                 self.warnings.append("missing values for %r" % (key,))
@@ -423,15 +432,15 @@ class SummaryReport(object):
             if result is None:
                 continue
 
-            index_name,index_class = result
+            index_name, index_class = result
             item = self.indexed_data_table.get(index_name)
             if item is None:
                 self.indexed_data_table[index_name] = item = index_class()
             item.append(medians)
-            
+
     def _build_normalized_data_table(self):
         self.normalized_data_table = {}
-        for key,indexed_value in self.indexed_data_table.items():
+        for key, indexed_value in self.indexed_data_table.items():
             test_name, metric, build_mode, arch, machine_id = key
             if test_name.startswith('Single File'):
                 aggr = Mean
@@ -446,10 +455,11 @@ class SummaryReport(object):
 
     single_file_stage_order = [
         'init', 'driver', 'syntax', 'irgen_only', 'codegen', 'assembly']
+
     def _build_final_data_tables(self):
         self.grouped_table = {}
         self.single_file_table = {}
-        for key,normalized_value in self.normalized_data_table.items():
+        for key, normalized_value in self.normalized_data_table.items():
             test_name, metric, build_mode, arch = key
 
             # If this isn't a single file test, add a plot for it grouped by
@@ -469,7 +479,7 @@ class SummaryReport(object):
                 stack_index = self.single_file_stage_order.index(stage_name)
             except ValueError:
                 stack_index = None
-            
+
             # If we don't have an index for this stage, ignore it.
             if stack_index is None:
                 continue




More information about the llvm-commits mailing list