[LNT] r252281 - Do not show samples in sparkline for failed results.

Kristof Beyls via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 6 02:31:34 PST 2015


Author: kbeyls
Date: Fri Nov  6 04:31:34 2015
New Revision: 252281

URL: http://llvm.org/viewvc/llvm-project?rev=252281&view=rev
Log:
Do not show samples in sparkline for failed results.

Modified:
    lnt/trunk/lnt/server/reporting/dailyreport.py
    lnt/trunk/lnt/server/ui/templates/reporting/daily_report.html
    lnt/trunk/tests/server/ui/Inputs/V4Pages_extra_records.sql
    lnt/trunk/tests/server/ui/V4Pages.py
    lnt/trunk/tests/server/ui/test_api.py

Modified: lnt/trunk/lnt/server/reporting/dailyreport.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/dailyreport.py?rev=252281&r1=252280&r2=252281&view=diff
==============================================================================
--- lnt/trunk/lnt/server/reporting/dailyreport.py (original)
+++ lnt/trunk/lnt/server/reporting/dailyreport.py Fri Nov  6 04:31:34 2015
@@ -58,7 +58,7 @@ class DayResults:
         for dr in self.day_results:
             if dr is None:
                 continue
-            if dr.cr.samples is not None:
+            if dr.cr.samples is not None and not dr.cr.failed:
                 all_samples.extend(dr.cr.samples)
         if len(all_samples) > 0:
             self.min_sample = min(all_samples)

Modified: lnt/trunk/lnt/server/ui/templates/reporting/daily_report.html
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/templates/reporting/daily_report.html?rev=252281&r1=252280&r2=252281&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/templates/reporting/daily_report.html (original)
+++ lnt/trunk/lnt/server/ui/templates/reporting/daily_report.html Fri Nov  6 04:31:34 2015
@@ -159,24 +159,26 @@
 {#- Make y-axis go upwards instead of downwards: #}
       <g transform="translate(0, {{full_height}}) scale(1, -1) ">
 {%- for dr in day_results -%}
-  {%- set day_nr = loop.index %}
-  {%- set nr_samples_for_day = dr.samples|length %}
-  {%- for sample in dr.samples -%}
-    {# fuzz the x-coordinate slightly so that multiple samples with the same
-       value can be noticed #}
-    {%- set sample_fuzz = (-sample_fuzzing*1.25) +
-                     (2.0*sample_fuzzing/nr_samples_for_day) * loop.index %}
+  {%- if dr is not none and not dr.cr.failed -%}
+    {%- set day_nr = loop.index %}
+    {%- set nr_samples_for_day = dr.samples|length %}
+    {%- for sample in dr.samples -%}
+      {# fuzz the x-coordinate slightly so that multiple samples with the same
+         value can be noticed #}
+      {%- set sample_fuzz = (-sample_fuzzing*1.25) +
+                       (2.0*sample_fuzzing/nr_samples_for_day) * loop.index %}
         <circle cx="{{ spark_x_coord(day_nr)|float + sample_fuzz }}" {# -#}
                 cy="{{ spark_y_coord(day_nr, sample) }}" r="1"
                 stroke-width="1" stroke="black" fill="black" />
-  {%- endfor -%}
+    {%- endfor -%}
+  {%- endif -%}
 {%- endfor %}
         <polyline points="
   {%- for dr in day_results -%}
     {%- if dr is not none -%}
       {%- set cr = dr.cr -%}
       {%- set day_nr = loop.index -%}
-      {%- if cr.current is not none %}
+      {%- if not cr.failed and cr.current is not none %}
           {{ spark_x_coord(day_nr) }} {{ spark_y_coord(day_nr, cr.current) }}
       {%- endif -%}
     {%- endif -%}

Modified: lnt/trunk/tests/server/ui/Inputs/V4Pages_extra_records.sql
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/server/ui/Inputs/V4Pages_extra_records.sql?rev=252281&r1=252280&r2=252281&view=diff
==============================================================================
--- lnt/trunk/tests/server/ui/Inputs/V4Pages_extra_records.sql (original)
+++ lnt/trunk/tests/server/ui/Inputs/V4Pages_extra_records.sql Fri Nov  6 04:31:34 2015
@@ -3,6 +3,7 @@ INSERT INTO "NT_Test" ("Name")
  VALUES('SingleSource/UnitTests/ObjC/block-byref-aggr'); -- ID 3 (was 87)
 INSERT INTO "compile_Test" ("Name")
  VALUES('compile/403.gcc/combine.c/init/(-O0)'); -- ID 3 (was 38)
+ 
 -- make sure there are 3 machines - to test ?filter-machine-regex= on daily_report page
 INSERT INTO "NT_Machine" ("Name", "Parameters", "hardware", "os")
  VALUES('machine2','[]','AArch64','linux'); -- ID 2
@@ -30,9 +31,10 @@ INSERT INTO "NT_Sample" ("RunID", "TestI
                          "execution_status", "compile_time", "execution_time",
                          "score", "mem_bytes")
  VALUES(4,1,NULL,NULL,0.001,0.0001,NULL,NULL); -- ID 4
+ 
 -- check that a regression on consecutive runs more than 1 day apart can be detected:
-INSERT INTO "NT_Test" VALUES(88,'test1');
-INSERT INTO "NT_Test" VALUES(89,'test2');
+INSERT INTO "NT_Test" VALUES(4,'test1'); -- ID 4
+INSERT INTO "NT_Test" VALUES(5,'test2'); -- ID 5
 INSERT INTO "NT_Order" ("NextOrder", "PreviousOrder", "llvm_project_revision")
  VALUES(NULL,NULL,'152292'); -- ID 5
 INSERT INTO "NT_Run" ("MachineID", "OrderID", "ImportedFrom", "StartTime",
@@ -42,11 +44,11 @@ INSERT INTO "NT_Run" ("MachineID", "Orde
 INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
                          "execution_status", "compile_time", "execution_time",
                          "score", "mem_bytes")
- VALUES(5,88,0,0,0.001,1.0,NULL,NULL); -- ID 5: passing result
+ VALUES(5,4,0,0,0.001,1.0,NULL,NULL); -- ID 5: passing result
 INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
                          "execution_status", "compile_time", "execution_time",
                          "score", "mem_bytes")
- VALUES(5,89,0,1,0.001,1.0,NULL,NULL); -- ID 6: failing result
+ VALUES(5,5,0,1,0.001,1.0,NULL,NULL); -- ID 6: failing result
 INSERT INTO "NT_Order" ("NextOrder", "PreviousOrder", "llvm_project_revision")
  VALUES(5,NULL,'152293'); -- ID 6
 UPDATE "NT_Order" SET "PreviousOrder" = 6 WHERE "ID" = 5;
@@ -57,10 +59,45 @@ INSERT INTO "NT_Run" ("MachineID", "Orde
 INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
                          "execution_status", "compile_time", "execution_time",
                          "score", "mem_bytes")
- VALUES(6,88,0,0,0.001,10.0,NULL,NULL); -- ID 7: passing result 10x slower
+ VALUES(6,4,0,0,0.001,10.0,NULL,NULL); -- ID 7: passing result 10x slower
 INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
                          "execution_status", "compile_time", "execution_time",
                          "score", "mem_bytes")
- VALUES(5,89,0,0,0.001,1.0,NULL,NULL); -- ID 8: passing result
+ VALUES(5,5,0,0,0.001,1.0,NULL,NULL); -- ID 8: passing result
+
+-- check that a failing test result does not show up in the sparkline
+INSERT INTO "NT_Test" VALUES(6,'test6'); -- ID 6
+INSERT INTO "NT_Order" ("NextOrder", "PreviousOrder", "llvm_project_revision")
+ VALUES(NULL,NULL,'152294'); -- ID 6
+INSERT INTO "NT_Order" ("NextOrder", "PreviousOrder", "llvm_project_revision")
+ VALUES(NULL,NULL,'152295'); -- ID 7
+INSERT INTO "NT_Order" ("NextOrder", "PreviousOrder", "llvm_project_revision")
+ VALUES(NULL,NULL,'152296'); -- ID 8
+INSERT INTO "NT_Run" ("MachineID", "OrderID", "ImportedFrom", "StartTime",
+                      "EndTime", "SimpleRunID", "Parameters")
+ VALUES(2,6,'run7.json','2012-05-10 16:28:23.000000',
+        '2012-05-10 16:28:58.000000',NULL,'[]'); -- ID 7
+INSERT INTO "NT_Run" ("MachineID", "OrderID", "ImportedFrom", "StartTime",
+                      "EndTime", "SimpleRunID", "Parameters")
+ VALUES(2,7,'run8.json','2012-05-11 16:28:23.000000',
+        '2012-05-11 16:28:58.000000',NULL,'[]'); -- ID 8
+INSERT INTO "NT_Run" ("MachineID", "OrderID", "ImportedFrom", "StartTime",
+                      "EndTime", "SimpleRunID", "Parameters")
+ VALUES(2,8,'run9.json','2012-05-12 16:28:23.000000',
+        '2012-05-12 16:28:58.000000',NULL,'[]'); -- ID 9
+INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
+                         "execution_status", "compile_time", "execution_time",
+                         "score", "mem_bytes")
+ VALUES(7,6,0,0,0.001,1.0,NULL,NULL); -- ID 9: passing result
+INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
+                         "execution_status", "compile_time", "execution_time",
+                         "score", "mem_bytes")
+ VALUES(8,6,0,1,0.001,1.0,NULL,NULL); -- ID 10: failing result
+INSERT INTO "NT_Sample" ("RunID", "TestID", "compile_status",
+                         "execution_status", "compile_time", "execution_time",
+                         "score", "mem_bytes")
+ VALUES(9,6,0,0,0.001,1.2,NULL,NULL); -- ID 11: passing result; 20% bigger,
+                                      -- so shown in daily report page.
+
 
 COMMIT;

Modified: lnt/trunk/tests/server/ui/V4Pages.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/server/ui/V4Pages.py?rev=252281&r1=252280&r2=252281&view=diff
==============================================================================
--- lnt/trunk/tests/server/ui/V4Pages.py (original)
+++ lnt/trunk/tests/server/ui/V4Pages.py Fri Nov  6 04:31:34 2015
@@ -103,8 +103,7 @@ def convert_html_to_text(element):
     return ("".join(element.itertext()))
 
 
-def check_body_result_table(client, url, fieldname,
-                            expected_table_body_content):
+def get_results_table(client, url, fieldname):
     resp = check_code(client, url)
     html = resp.data
     tree = get_xml_tree(html)
@@ -112,6 +111,12 @@ def check_body_result_table(client, url,
     table = find_table_with_heading(tree, table_header)
     assert table is not None, \
         "Couldn't find table with header '%s'" % table_header
+    return table
+
+
+def check_body_result_table(client, url, fieldname,
+                            expected_table_body_content):
+    table = get_results_table(client, url, fieldname)
     body_content = [[convert_html_to_text(cell).strip()
                      for cell in row.findall("./td")]
                     for row in table.findall("./tr")]
@@ -120,8 +125,36 @@ def check_body_result_table(client, url,
         (expected_table_body_content, body_content)
 
 
+def get_sparkline(client, url, fieldname, testname, machinename):
+    table = get_results_table(client, url, fieldname)
+    body_content = [[cell
+                     for cell in row.findall("./td")]
+                    for row in table.findall("./tr")]
+    txt_body_content = [[convert_html_to_text(cell).strip()
+                         for cell in row.findall("./td")]
+                        for row in table.findall("./tr")]
+    cur_test_name = ""
+    for rownr, row_content in enumerate(txt_body_content):
+        nr_columns = len(row_content)
+        for colnr, col_content in enumerate(row_content):
+            if colnr == 0 and col_content != "":
+                cur_test_name = col_content
+            if colnr == 1 and col_content != "":
+                cur_machine_name = machinename
+                if (cur_machine_name, cur_test_name) == \
+                   (machinename, testname):
+                    return body_content[rownr][-1]
+    return None
+
+
+def extract_sample_points(sparkline_svg):
+    # assume all svg:circle elements are exactly all the sample points
+    samples = sparkline_svg.findall(".//circle")
+    return samples
+
+
 def main():
-    _,instance_path = sys.argv
+    _, instance_path = sys.argv
 
     # Create the application instance.
     app = lnt.server.ui.app.App.create_standalone(instance_path)
@@ -234,6 +267,20 @@ def main():
                              ["test2", ""],
                              ["", "machine2", "FAIL", "-", "PASS", ""]])
 
+    # Check that a failing result does not show up in the spark line
+    # as a dot with value 0.
+    check_body_result_table(client,
+                            '/v4/nts/daily_report/2012/5/13?num_days=3',
+                            "execution_time",
+                            [["test6", ""],
+                             ["", "machine2", "1.0000", "FAIL", "PASS", ""]])
+    sparkline_xml = get_sparkline(client,
+                                  '/v4/nts/daily_report/2012/5/13?num_days=3',
+                                  "execution_time", "test6", "machine2")
+    nr_sample_points = len(extract_sample_points(sparkline_xml))
+    assert 2 == nr_sample_points, \
+        "Expected 2 sample points, found %d" % nr_sample_points
+
     # Now check the compile report
     # Get the V4 overview page.
     check_code(client, '/v4/compile/')

Modified: lnt/trunk/tests/server/ui/test_api.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/server/ui/test_api.py?rev=252281&r1=252280&r2=252281&view=diff
==============================================================================
--- lnt/trunk/tests/server/ui/test_api.py (original)
+++ lnt/trunk/tests/server/ui/test_api.py Fri Nov  6 04:31:34 2015
@@ -39,7 +39,10 @@ machine_expected_response[0][u'runs'] =
 machine_expected_response[1] = machines_expected_response[1].copy()
 machine_expected_response[1][u'runs'] = [u'/api/db_default/v4/nts/run/3',
                                          u'/api/db_default/v4/nts/run/5',
-                                         u'/api/db_default/v4/nts/run/6']
+                                         u'/api/db_default/v4/nts/run/6',
+                                         u'/api/db_default/v4/nts/run/7',
+                                         u'/api/db_default/v4/nts/run/8',
+                                         u'/api/db_default/v4/nts/run/9']
 
 machine_expected_response[2] = machines_expected_response[2].copy()
 machine_expected_response[2][u'runs'] = [u'/api/db_default/v4/nts/run/4']
@@ -105,7 +108,7 @@ class JSONAPITester(unittest.TestCase):
     def test_graph_api(self):
         """Check that /graph/x/y/z returns what we expect."""
         client = self.client
-        j = check_json(client, 'api/db_default/v4/nts/graph/2/88/3')
+        j = check_json(client, 'api/db_default/v4/nts/graph/2/4/3')
         self.assertEqual(graph_data, j)
 
 if __name__ == '__main__':




More information about the llvm-commits mailing list