[llvm-commits] [LNT] r161337 - in /lnt/trunk/lnt: db/__init__.py db/runinfo.py server/reporting/analysis.py server/reporting/runs.py server/ui/templates/v4_run.html server/ui/views.py util/ImportData.py

Daniel Dunbar daniel at zuster.org
Mon Aug 6 13:02:55 PDT 2012


Author: ddunbar
Date: Mon Aug  6 15:02:55 2012
New Revision: 161337

URL: http://llvm.org/viewvc/llvm-project?rev=161337&view=rev
Log:
Integrate lnt.db.runinfo into lnt.server.reporting.analysis.

Removed:
    lnt/trunk/lnt/db/__init__.py
    lnt/trunk/lnt/db/runinfo.py
Modified:
    lnt/trunk/lnt/server/reporting/analysis.py
    lnt/trunk/lnt/server/reporting/runs.py
    lnt/trunk/lnt/server/ui/templates/v4_run.html
    lnt/trunk/lnt/server/ui/views.py
    lnt/trunk/lnt/util/ImportData.py

Removed: lnt/trunk/lnt/db/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/db/__init__.py?rev=161336&view=auto
==============================================================================
--- lnt/trunk/lnt/db/__init__.py (original)
+++ lnt/trunk/lnt/db/__init__.py (removed)
@@ -1 +0,0 @@
-__all__ = []

Removed: lnt/trunk/lnt/db/runinfo.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/db/runinfo.py?rev=161336&view=auto
==============================================================================
--- lnt/trunk/lnt/db/runinfo.py (original)
+++ lnt/trunk/lnt/db/runinfo.py (removed)
@@ -1,103 +0,0 @@
-REGRESSED = 'REGRESSED'
-IMPROVED = 'IMPROVED'
-UNCHANGED_PASS = 'UNCHANGED_PASS'
-UNCHANGED_FAIL = 'UNCHANGED_FAIL'
-
-class ComparisonResult:
-    def __init__(self, cur_value, prev_value, delta, pct_delta, stddev, MAD,
-                 cur_failed, prev_failed, samples, stddev_mean = None,
-                 stddev_is_estimated = False):
-        self.current = cur_value
-        self.previous = prev_value
-        self.delta = delta
-        self.pct_delta = pct_delta
-        self.stddev = stddev
-        self.MAD = MAD
-        self.failed = cur_failed
-        self.prev_failed = prev_failed
-        self.samples = samples
-        self.stddev_mean = stddev_mean
-        self.stddev_is_estimated = stddev_is_estimated
-
-    def get_samples(self):
-        return self.samples
-
-    def get_test_status(self):
-        # Compute the comparison status for the test success.
-        if self.failed:
-            if self.prev_failed:
-                return UNCHANGED_FAIL
-            else:
-                return REGRESSED
-        else:
-            if self.prev_failed:
-                return IMPROVED
-            else:
-                return UNCHANGED_PASS
-
-    def get_value_status(self, confidence_interval=2.576,
-                         value_precision=0.0001, ignore_small=True):
-        if self.current is None or self.previous is None:
-            return None
-
-        # Don't report value errors for tests which fail, or which just started
-        # passing.
-        #
-        # FIXME: One bug here is that we risk losing performance data on tests
-        # which flop to failure then back. What would be nice to do here is to
-        # find the last value in a passing run, or to move to using proper keyed
-        # reference runs.
-        if self.failed:
-            return UNCHANGED_FAIL
-        elif self.prev_failed:
-            return UNCHANGED_PASS
-
-        # Ignore tests whose delt is too small relative to the precision we can
-        # sample at; otherwise quantization means that we can't measure the
-        # standard deviation with enough accuracy.
-        if abs(self.delta) <= 2 * value_precision * confidence_interval:
-            return UNCHANGED_PASS
-
-        # Always ignore percentage changes below 1%, for now, we just don't have
-        # enough time to investigate that level of stuff.
-        if ignore_small and abs(self.pct_delta) < .01:
-            return UNCHANGED_PASS
-
-        # Always ignore changes with small deltas. There is no mathematical
-        # basis for this, it should be obviated by appropriate statistical
-        # checks, but practical evidence indicates what we currently have isn't
-        # good enough (for reasons I do not yet understand).
-        if ignore_small and abs(self.delta) < .01:
-            return UNCHANGED_PASS
-
-        # If we have a comparison window, then measure using a symmetic
-        # confidence interval.
-        if self.stddev is not None:
-            is_significant = abs(self.delta) > (self.stddev *
-                                                confidence_interval)
-
-            # If the stddev is estimated, then it is also only significant if
-            # the delta from the estimate mean is above the confidence interval.
-            if self.stddev_is_estimated:
-                is_significant &= (abs(self.current - self.stddev_mean) >
-                                   self.stddev * confidence_interval)
-
-            # If the delta is significant, return 
-            if is_significant:
-                if self.delta < 0:
-                    return IMPROVED
-                else:
-                    return REGRESSED
-            else:
-                return UNCHANGED_PASS
-
-        # Otherwise, report any changes above 0.2%, which is a rough
-        # approximation for the smallest change we expect "could" be measured
-        # accurately.
-        if abs(self.pct_delta) >= .002:
-            if self.pct_delta < 0:
-                return IMPROVED
-            else:
-                return REGRESSED
-        else:
-            return UNCHANGED_PASS

Modified: lnt/trunk/lnt/server/reporting/analysis.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/analysis.py?rev=161337&r1=161336&r2=161337&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/analysis.py (original)
+++ lnt/trunk/lnt/server/reporting/analysis.py Mon Aug  6 15:02:55 2012
@@ -4,9 +4,112 @@
 
 from lnt.util import stats
 from lnt.server.ui import util
-from lnt.db.runinfo import ComparisonResult
 from lnt.testing import PASS, FAIL, XFAIL
 
+REGRESSED = 'REGRESSED'
+IMPROVED = 'IMPROVED'
+UNCHANGED_PASS = 'UNCHANGED_PASS'
+UNCHANGED_FAIL = 'UNCHANGED_FAIL'
+
+class ComparisonResult:
+    def __init__(self, cur_value, prev_value, delta, pct_delta, stddev, MAD,
+                 cur_failed, prev_failed, samples, stddev_mean = None,
+                 stddev_is_estimated = False):
+        self.current = cur_value
+        self.previous = prev_value
+        self.delta = delta
+        self.pct_delta = pct_delta
+        self.stddev = stddev
+        self.MAD = MAD
+        self.failed = cur_failed
+        self.prev_failed = prev_failed
+        self.samples = samples
+        self.stddev_mean = stddev_mean
+        self.stddev_is_estimated = stddev_is_estimated
+
+    def get_samples(self):
+        return self.samples
+
+    def get_test_status(self):
+        # Compute the comparison status for the test success.
+        if self.failed:
+            if self.prev_failed:
+                return UNCHANGED_FAIL
+            else:
+                return REGRESSED
+        else:
+            if self.prev_failed:
+                return IMPROVED
+            else:
+                return UNCHANGED_PASS
+
+    def get_value_status(self, confidence_interval=2.576,
+                         value_precision=0.0001, ignore_small=True):
+        if self.current is None or self.previous is None:
+            return None
+
+        # Don't report value errors for tests which fail, or which just started
+        # passing.
+        #
+        # FIXME: One bug here is that we risk losing performance data on tests
+        # which flop to failure then back. What would be nice to do here is to
+        # find the last value in a passing run, or to move to using proper keyed
+        # reference runs.
+        if self.failed:
+            return UNCHANGED_FAIL
+        elif self.prev_failed:
+            return UNCHANGED_PASS
+
+        # Ignore tests whose delt is too small relative to the precision we can
+        # sample at; otherwise quantization means that we can't measure the
+        # standard deviation with enough accuracy.
+        if abs(self.delta) <= 2 * value_precision * confidence_interval:
+            return UNCHANGED_PASS
+
+        # Always ignore percentage changes below 1%, for now, we just don't have
+        # enough time to investigate that level of stuff.
+        if ignore_small and abs(self.pct_delta) < .01:
+            return UNCHANGED_PASS
+
+        # Always ignore changes with small deltas. There is no mathematical
+        # basis for this, it should be obviated by appropriate statistical
+        # checks, but practical evidence indicates what we currently have isn't
+        # good enough (for reasons I do not yet understand).
+        if ignore_small and abs(self.delta) < .01:
+            return UNCHANGED_PASS
+
+        # If we have a comparison window, then measure using a symmetic
+        # confidence interval.
+        if self.stddev is not None:
+            is_significant = abs(self.delta) > (self.stddev *
+                                                confidence_interval)
+
+            # If the stddev is estimated, then it is also only significant if
+            # the delta from the estimate mean is above the confidence interval.
+            if self.stddev_is_estimated:
+                is_significant &= (abs(self.current - self.stddev_mean) >
+                                   self.stddev * confidence_interval)
+
+            # If the delta is significant, return 
+            if is_significant:
+                if self.delta < 0:
+                    return IMPROVED
+                else:
+                    return REGRESSED
+            else:
+                return UNCHANGED_PASS
+
+        # Otherwise, report any changes above 0.2%, which is a rough
+        # approximation for the smallest change we expect "could" be measured
+        # accurately.
+        if abs(self.pct_delta) >= .002:
+            if self.pct_delta < 0:
+                return IMPROVED
+            else:
+                return REGRESSED
+        else:
+            return UNCHANGED_PASS
+
 class RunInfo(object):
     def __init__(self, testsuite, runs_to_load,
                  aggregation_fn = min):

Modified: lnt/trunk/lnt/server/reporting/runs.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/runs.py?rev=161337&r1=161336&r2=161337&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/runs.py (original)
+++ lnt/trunk/lnt/server/reporting/runs.py Mon Aug  6 15:02:55 2012
@@ -10,7 +10,6 @@
 import lnt.server.reporting.analysis
 import lnt.server.ui.util
 import lnt.util.stats
-from lnt.db import runinfo
 
 def generate_run_report(run, baseurl, only_html_body = False,
                         num_comparison_runs = 10, result = None,
@@ -365,19 +364,19 @@
             comparison_results[(name,field)] = cr
             test_status = cr.get_test_status()
             perf_status = cr.get_value_status()
-            if test_status == runinfo.REGRESSED:
+            if test_status == lnt.server.reporting.analysis.REGRESSED:
                 bucket = new_failures
-            elif test_status == runinfo.IMPROVED:
+            elif test_status == lnt.server.reporting.analysis.IMPROVED:
                 bucket = new_passes
             elif cr.current is None and cr.previous is not None:
                 bucket = removed_tests
             elif cr.current is not None and cr.previous is None:
                 bucket = added_tests
-            elif test_status == runinfo.UNCHANGED_FAIL:
+            elif test_status == lnt.server.reporting.analysis.UNCHANGED_FAIL:
                 bucket = existing_failures
-            elif perf_status == runinfo.REGRESSED:
+            elif perf_status == lnt.server.reporting.analysis.REGRESSED:
                 bucket = perf_regressions
-            elif perf_status == runinfo.IMPROVED:
+            elif perf_status == lnt.server.reporting.analysis.IMPROVED:
                 bucket = perf_improvements
             else:
                 bucket = unchanged_tests

Modified: lnt/trunk/lnt/server/ui/templates/v4_run.html
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/templates/v4_run.html?rev=161337&r1=161336&r2=161337&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/templates/v4_run.html (original)
+++ lnt/trunk/lnt/server/ui/templates/v4_run.html Mon Aug  6 15:02:55 2012
@@ -30,11 +30,11 @@
 {% endif %}
 
 {% set cell_color = none %}
-{% if test_status == runinfo.REGRESSED %}
+{% if test_status == analysis.REGRESSED %}
     {% set cell_color = (233,128,128) %}
-{% elif test_status == runinfo.IMPROVED %}
+{% elif test_status == analysis.IMPROVED %}
     {% set cell_color = (143,223,95) %}
-{% elif test_status == runinfo.UNCHANGED_FAIL %}
+{% elif test_status == analysis.UNCHANGED_FAIL %}
     {% set cell_color = (255,195,67) %}
 {% endif %}
 
@@ -46,8 +46,8 @@
 {% endif %}
 
 {% if (options.show_all or
-       value_status == runinfo.REGRESSED or
-       value_status == runinfo.IMPROVED) %}
+       value_status == analysis.REGRESSED or
+       value_status == analysis.IMPROVED) %}
     {{ cr.pct_delta|aspctcell|safe }}
 {% else %}
     <td>-</td>

Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=161337&r1=161336&r2=161337&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Mon Aug  6 15:02:55 2012
@@ -19,7 +19,7 @@
 import lnt.util.stats
 from lnt.server.ui.globals import db_url_for, v4_url_for
 import lnt.server.reporting.analysis
-from lnt.db import runinfo
+import lnt.server.reporting.runs
 from lnt.server.ui.decorators import frontend, db_route, v4_route
 
 ###
@@ -328,7 +328,7 @@
     return render_template(
         "v4_run.html", ts=ts, options=options,
         primary_fields=list(ts.Sample.get_primary_fields()),
-        test_info=test_info, runinfo=runinfo,
+        test_info=test_info, analysis=lnt.server.reporting.analysis,
         test_min_value_filter=test_min_value_filter,
         request_info=info)
 

Modified: lnt/trunk/lnt/util/ImportData.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/util/ImportData.py?rev=161337&r1=161336&r2=161337&view=diff
==============================================================================
--- lnt/trunk/lnt/util/ImportData.py (original)
+++ lnt/trunk/lnt/util/ImportData.py Mon Aug  6 15:02:55 2012
@@ -2,7 +2,8 @@
 
 import lnt.testing
 import lnt.formats
-import lnt.db.runinfo
+import lnt.server.reporting.analysis
+
 from lnt.util import NTEmailReport
 
 def import_and_report(config, db_name, db, file, format, commit=False,
@@ -222,20 +223,20 @@
             #
             # FIXME: Think longer about mapping to test codes.
             result_info = None
-            if test_status == lnt.db.runinfo.REGRESSED:
+            if test_status == lnt.server.reporting.analysis.REGRESSED:
                 result_string = 'FAIL'
-            elif test_status == lnt.db.runinfo.IMPROVED:
+            elif test_status == lnt.server.reporting.analysis.IMPROVED:
                 result_string = 'IMPROVED'
                 result_info = "Test started passing."
-            elif test_status == lnt.db.runinfo.UNCHANGED_FAIL:
+            elif test_status == lnt.server.reporting.analysis.UNCHANGED_FAIL:
                 result_string = 'XFAIL'
             elif perf_status == None:
                 # Missing perf status means test was just added or removed.
                 result_string = 'PASS'
-            elif perf_status == lnt.db.runinfo.REGRESSED:
+            elif perf_status == lnt.server.reporting.analysis.REGRESSED:
                 result_string = 'REGRESSED'
                 result_info = 'Performance regressed.'
-            elif perf_status == lnt.db.runinfo.IMPROVED:
+            elif perf_status == lnt.server.reporting.analysis.IMPROVED:
                 result_string = 'IMPROVED'
                 result_info = 'Performance improved.'
             else:





More information about the llvm-commits mailing list