[llvm-commits] [zorg] r109780 - in /zorg/trunk/lnt/lnt: db/runinfo.py util/NTEmailReport.py viewer/simple.ptl

Daniel Dunbar daniel at zuster.org
Thu Jul 29 11:15:08 PDT 2010


Author: ddunbar
Date: Thu Jul 29 13:15:08 2010
New Revision: 109780

URL: http://llvm.org/viewvc/llvm-project?rev=109780&view=rev
Log:
LNT/simple: Compute status kinds on a per run basis instead of assuming the kind is consistent across the entire database, which was totally bogus.

Modified:
    zorg/trunk/lnt/lnt/db/runinfo.py
    zorg/trunk/lnt/lnt/util/NTEmailReport.py
    zorg/trunk/lnt/lnt/viewer/simple.ptl

Modified: zorg/trunk/lnt/lnt/db/runinfo.py
URL: http://llvm.org/viewvc/llvm-project/zorg/trunk/lnt/lnt/db/runinfo.py?rev=109780&r1=109779&r2=109780&view=diff
==============================================================================
--- zorg/trunk/lnt/lnt/db/runinfo.py (original)
+++ zorg/trunk/lnt/lnt/db/runinfo.py Thu Jul 29 13:15:08 2010
@@ -92,8 +92,31 @@
         self.sample_map = Util.multidict()
         self.loaded_samples = set()
 
-    def get_run_comparison_result(self, run, compare_to, test_name, pset,
-                                  comparison_window=[]):
+    def get_test_status_in_run(self, run, status_kind, test_name, pset):
+        if status_kind == False: # .success
+            status_name = test_name + '.success'
+            status_test_id = self.test_suite_summary.test_id_map.get(
+                (status_name, pset))
+            run_status = self.sample_map.get((run.id, status_test_id))
+            if run_status and int(run_status[0]) == 1:
+                return PASS
+            else:
+                return FAIL
+        else:
+            status_name = test_name + '.status'
+            status_test_id = self.test_suite_summary.test_id_map.get(
+                (status_name, pset))
+            run_status = self.sample_map.get((run.id, status_test_id))
+            if not run_status:
+                return PASS
+            else:
+                # FIXME: What to do about the multiple entries here. We could
+                # start by just treating non-matching samples as errors.
+                return int(run_status[0])
+
+    def get_run_comparison_result(self, run, run_status_kind,
+                                  compare_to, compare_to_status_kind,
+                                  test_name, pset, comparison_window=[]):
         # Get the test.
         test_id = self.test_suite_summary.test_id_map.get((test_name, pset))
         if test_id is None:
@@ -101,15 +124,6 @@
                                     pct_delta=None, stddev=None, MAD=None,
                                     cur_failed=None, prev_failed=None)
 
-        # Get the test status info.
-        status_info = self.test_suite_summary.test_status_map.get(test_name)
-        if status_info is not None:
-            status_name,status_kind = status_info
-            status_test_id = self.test_suite_summary.test_id_map.get(
-                (status_name, pset))
-        else:
-            status_test_id = status_kind = None
-
         # Load the sample data for the current and previous runs and the
         # comparison window.
         if compare_to is None:
@@ -129,25 +143,18 @@
         # Determine whether this (test,pset) passed or failed in the current and
         # previous runs.
         run_failed = prev_failed = False
-        if not status_test_id:
-            # Assume status kind is '.status' if missing, in which case no
-            # values indicates success.
-            run_failed = bool(run_values)
-            prev_failed = bool(prev_values)
-        else:
-            run_status = self.sample_map.get((run.id,status_test_id))
-            prev_status = self.sample_map.get((compare_id,status_test_id))
-
-            # FIXME: Support XFAILs better.
-            #
-            # FIXME: What to do about the multiple entries here. We could start
-            # by just treating non-matching samples as errors.
-            if status_kind == False: # .success style
-                run_failed = not run_status or not run_status[0]
-                prev_failed = not prev_status or not prev_status[0]
-            else:
-                run_failed = run_status and run_status[0] == FAIL
-                prev_failed = prev_status and prev_status[0] == FAIL
+        run_status = prev_status = None
+        run_status = self.get_test_status_in_run(
+            run, run_status_kind, test_name, pset)
+        if compare_to:
+            prev_status = self.get_test_status_in_run(
+                compare_to, compare_to_status_kind, test_name, pset)
+        else:
+            prev_status = None
+
+        # FIXME: Support XFAILs better.
+        run_failed = run_status == FAIL
+        prev_failed = prev_status == FAIL
 
         # Get the current and previous values.
         if run_values:

Modified: zorg/trunk/lnt/lnt/util/NTEmailReport.py
URL: http://llvm.org/viewvc/llvm-project/zorg/trunk/lnt/lnt/util/NTEmailReport.py?rev=109780&r1=109779&r2=109780&view=diff
==============================================================================
--- zorg/trunk/lnt/lnt/util/NTEmailReport.py (original)
+++ zorg/trunk/lnt/lnt/util/NTEmailReport.py Thu Jul 29 13:15:08 2010
@@ -114,6 +114,14 @@
         # FIXME: Look for run across machine.
         compare_to = None
 
+    # Get the test status style used in each run.
+    run_status_kind = run_summary.get_run_status_kind(db, run.id)
+    if compare_to:
+        compare_to_status_kind = run_summary.get_run_status_kind(
+            db, compare_to.id)
+    else:
+        compare_to_status_kind = None
+
     # Get the list of tests we are interested in.
     interesting_runs = [run.id]
     if compare_to:
@@ -132,8 +140,9 @@
     num_total_tests = len(test_names) * len(ts_summary.parameter_sets)
     for name in test_names:
         for pset in ts_summary.parameter_sets:
-            cr = sri.get_run_comparison_result(run, compare_to, name, pset,
-                                               comparison_window)
+            cr = sri.get_run_comparison_result(
+                run, run_status_kind, compare_to, compare_to_status_kind,
+                name, pset, comparison_window)
             test_status = cr.get_test_status()
             perf_status = cr.get_value_status()
             if test_status == runinfo.REGRESSED:

Modified: zorg/trunk/lnt/lnt/viewer/simple.ptl
URL: http://llvm.org/viewvc/llvm-project/zorg/trunk/lnt/lnt/viewer/simple.ptl?rev=109780&r1=109779&r2=109780&view=diff
==============================================================================
--- zorg/trunk/lnt/lnt/viewer/simple.ptl (original)
+++ zorg/trunk/lnt/lnt/viewer/simple.ptl Thu Jul 29 13:15:08 2010
@@ -435,6 +435,14 @@
         return text_report
 
     def _q_index_body [html] (self, db, run, run_summary, compare_to):
+        # Get the test status style used in each run.
+        run_status_kind = run_summary.get_run_status_kind(db, run.id)
+        if compare_to:
+            compare_to_status_kind = run_summary.get_run_status_kind(
+                db, compare_to.id)
+        else:
+            compare_to_status_kind = None
+
         # Load the test suite summary.
         ts_summary = perfdbsummary.get_simple_suite_summary(db, self.tag)
         sri = runinfo.SimpleRunInfo(db, ts_summary)
@@ -611,8 +619,9 @@
             <td><input type="checkbox" name="test.%s"></td>
             <td>%s</td>""" % (name, name)
             for pset in ts_summary.parameter_sets:
-                cr = sri.get_run_comparison_result(run, compare_to, name, pset,
-                                                   comparison_window)
+                cr = sri.get_run_comparison_result(
+                    run, run_status_kind, compare_to, compare_to_status_kind,
+                    name, pset, comparison_window)
                 get_cell_value(cr)
             """
           </tr>"""





More information about the llvm-commits mailing list