[llvm-commits] [LNT] r163767 - in /lnt/trunk/lnt/server: reporting/dailyreport.py ui/templates/v4_daily_report.html ui/views.py
Daniel Dunbar
daniel at zuster.org
Wed Sep 12 18:39:30 PDT 2012
Author: ddunbar
Date: Wed Sep 12 20:39:30 2012
New Revision: 163767
URL: http://llvm.org/viewvc/llvm-project?rev=163767&view=rev
Log:
[dailyreport] Factor report out into its own module.
Added:
lnt/trunk/lnt/server/reporting/dailyreport.py
Modified:
lnt/trunk/lnt/server/ui/templates/v4_daily_report.html
lnt/trunk/lnt/server/ui/views.py
Added: lnt/trunk/lnt/server/reporting/dailyreport.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/dailyreport.py?rev=163767&view=auto
==============================================================================
--- lnt/trunk/lnt/server/reporting/dailyreport.py (added)
+++ lnt/trunk/lnt/server/reporting/dailyreport.py Wed Sep 12 20:39:30 2012
@@ -0,0 +1,145 @@
+import datetime
+
+from lnt.server.ui import util
+
+class DailyReport(object):
+ def __init__(self, ts, year, month, day, num_prior_days_to_include = 3):
+ self.ts = ts
+ self.num_prior_days_to_include = num_prior_days_to_include
+ self.year = year
+ self.month = month
+ self.day = day
+
+ # Computed values.
+ self.day_start_offset = None
+ self.next_day = None
+ self.prior_days = None
+ self.reporting_machines = None
+ self.reporting_tests = None
+ self.result_table = None
+
+ def build(self):
+ ts = self.ts
+
+ # Construct datetime instances for the report range.
+ day_ordinal = datetime.datetime(self.year, self.month,
+ self.day).toordinal()
+
+ # Adjust the dates time component. As we typically want to do runs
+ # overnight, we define "daily" to really mean "at 0700".
+ self.day_start_offset = datetime.timedelta(hours=7)
+
+ self.next_day = (datetime.datetime.fromordinal(day_ordinal + 1) +
+ self.day_start_offset)
+ self.prior_days = [(datetime.datetime.fromordinal(day_ordinal - i) +
+ self.day_start_offset)
+ for i in range(self.num_prior_days_to_include + 1)]
+
+ # Find all the runs that occurred for each day slice.
+ prior_runs = [ts.query(ts.Run).\
+ filter(ts.Run.start_time > prior_day).\
+ filter(ts.Run.start_time <= day).all()
+ for day,prior_day in util.pairs(self.prior_days)]
+
+ # For every machine, we only want to report on the last run order that
+ # was reported for that machine for the particular day range.
+ #
+ # Note that this *does not* mean that we will only report for one
+ # particular run order for each day, because different machines may
+ # report on different orders.
+ #
+ # However, we want to limit ourselves to a single run order for each
+ # (day,machine) so that we don't obscure any details through our
+ # aggregation.
+ self.prior_days_machine_order_map = \
+ [None] * self.num_prior_days_to_include
+ for i,runs in enumerate(prior_runs):
+ # Aggregate the runs by machine.
+ machine_to_all_orders = util.multidict()
+ for r in runs:
+ machine_to_all_orders[r.machine] = r.order
+
+ # Create a map from machine to max order.
+ self.prior_days_machine_order_map[i] = machine_order_map = dict(
+ (machine, max(orders))
+ for machine,orders in machine_to_all_orders.items())
+
+ # Update the run list to only include the runs with that order.
+ prior_runs[i] = [r for r in runs
+ if r.order is machine_order_map[r.machine]]
+
+ # Form a list of all relevant runs.
+ relevant_runs = sum(prior_runs, [])
+
+ # Find the union of all machines reporting in the relevant runs.
+ self.reporting_machines = list(set(r.machine for r in relevant_runs))
+ self.reporting_machines.sort(key = lambda m: m.name)
+
+ # We aspire to present a "lossless" report, in that we don't ever hide
+ # any possible change due to aggregation. In addition, we want to make
+ # it easy to see the relation of results across all the reporting
+ # machines. In particular:
+ #
+ # (a) When a test starts failing or passing on one machine, it should
+ # be easy to see how that test behaved on other machines. This
+ # makes it easy to identify the scope of the change.
+ #
+ # (b) When a performance change occurs, it should be easy to see the
+ # performance of that test on other machines. This makes it easy
+ # to see the scope of the change and to potentially apply human
+ # discretion in determining whether or not a particular result is
+ # worth considering (as opposed to noise).
+ #
+ # The idea is as follows, for each (machine, test, primary_field),
+ # classify the result into one of REGRESSED, IMPROVED, UNCHANGED_FAIL,
+ # ADDED, REMOVED, PERFORMANCE_REGRESSED, PERFORMANCE_IMPROVED.
+ #
+ # For now, we then just aggregate by test and present the results as
+ # is. This is lossless, but not nearly as nice to read as the old style
+ # per-machine reports. In the future we will want to find a way to
+ # combine the per-machine report style of presenting results aggregated
+ # by the kind of status change, while still managing to present the
+ # overview across machines.
+
+ # Batch load all of the samples reported by all these runs.
+ columns = [ts.Sample.run_id,
+ ts.Sample.test_id]
+ columns.extend(f.column
+ for f in ts.sample_fields)
+ samples = ts.query(*columns).\
+ filter(ts.Sample.run_id.in_(
+ r.id for r in relevant_runs)).all()
+
+ # Find the union of tests reported in the relevant runs.
+ #
+ # FIXME: This is not particularly efficient, should we just use all
+ # tests in the database?
+ self.reporting_tests = ts.query(ts.Test).\
+ filter(ts.Test.id.in_(set(s[1] for s in samples))).\
+ order_by(ts.Test.name).all()
+
+ # Aggregate all of the samples by (run_id, test_id).
+ sample_map = util.multidict()
+ for s in samples:
+ sample_map[(s[0], s[1])] = s[2:]
+
+ # Build the result table:
+ # result_table[test_index][day_index][machine_index] = {samples}
+ self.result_table = []
+ for test in self.reporting_tests:
+ key = test
+ test_results = []
+ for day_runs in prior_runs:
+ day_results = []
+ for machine in self.reporting_machines:
+ # Collect all the results for this machine.
+ results = [s
+ for run in day_runs
+ if run.machine is machine
+ for s in sample_map.get((run.id, test.id), ())]
+ day_results.append(results)
+ test_results.append(day_results)
+ self.result_table.append(test_results)
+
+ # FIXME: Now compute ComparisonResult objects for each (test, machine,
+ # day).
Modified: lnt/trunk/lnt/server/ui/templates/v4_daily_report.html
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/templates/v4_daily_report.html?rev=163767&r1=163766&r2=163767&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/templates/v4_daily_report.html (original)
+++ lnt/trunk/lnt/server/ui/templates/v4_daily_report.html Wed Sep 12 20:39:30 2012
@@ -7,19 +7,20 @@
<center><h2>{#
#}(<a href="{{v4_url_for('v4_daily_report',
- year=prior_days[1].year,
- month=prior_days[1].month,
- day=prior_days[1].day)}}">prev</a>){#
+ year=report.prior_days[1].year,
+ month=report.prior_days[1].month,
+ day=report.prior_days[1].day)}}">prev</a>){#
#}<b> Daily Overview {{ '%04d-%02d-%02d' % (
- prior_days[0].year, prior_days[0].month, prior_days[0].day) }} </b>{#
+ report.prior_days[0].year, report.prior_days[0].month,
+ report.prior_days[0].day) }} </b>{#
#}(<a href="{{v4_url_for('v4_daily_report',
- year=next_day.year,
- month=next_day.month,
- day=next_day.day)}}">next</a>){#
+ year=report.next_day.year,
+ month=report.next_day.month,
+ day=report.next_day.day)}}">next</a>){#
#}</h2>
(day start is considered to be at +{{ "%02d:%02d" % (
- (day_start_offset.seconds // 3600),
- (day_start_offset.seconds // 60) % 60,)}})
+ (report.day_start_offset.seconds // 3600),
+ (report.day_start_offset.seconds // 60) % 60,)}})
</center>
{# Generate the table showing which run orders we are reporting on, for each
@@ -29,16 +30,16 @@
<thead>
<tr>
<th>Machine Name</th>
-{% for i in range(num_prior_days_to_include)|reverse %}
+{% for i in range(report.num_prior_days_to_include)|reverse %}
<th>Day - {{i}}</th>
{% endfor %}
</tr>
</thead>
-{% for machine in reporting_machines %}
+{% for machine in report.reporting_machines %}
<tr>
<td>{{machine.name}}</td>
-{% for i in range(num_prior_days_to_include)|reverse %}
-{% set order = prior_days_machine_order_map[i].get(machine) %}
+{% for i in range(report.num_prior_days_to_include)|reverse %}
+{% set order = report.prior_days_machine_order_map[i].get(machine) %}
{% if order %}
{# FIXME: Don't hard code field name. #}
<td>{{order.llvm_project_revision}}</td>
@@ -57,16 +58,16 @@
<tr>
<th>Test Name</th>
<th>Machine Name</th>
-{% for i in range(num_prior_days_to_include)|reverse %}
+{% for i in range(report.num_prior_days_to_include)|reverse %}
<th>Day - {{i}}</th>
{% endfor %}
</thead>
-{% for test,test_results in zip(reporting_tests, result_table) %}
+{% for test,test_results in zip(report.reporting_tests, report.result_table) %}
<tr>
<td colspan="2"><b>{{test.name}}</b></td>
- <td colspan="{{num_prior_days_to_include}}"> </td>
+ <td colspan="{{report.num_prior_days_to_include}}"> </td>
</tr>
-{% for machine in reporting_machines %}
+{% for machine in report.reporting_machines %}
{% set machine_loop = loop %}
<tr>
<td> </td>
Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=163767&r1=163766&r2=163767&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Wed Sep 12 20:39:30 2012
@@ -24,6 +24,8 @@
import lnt.server.reporting.runs
from lnt.server.ui.decorators import frontend, db_route, v4_route
import lnt.server.ui.util
+import lnt.server.reporting.dailyreport
+import lnt.server.reporting.summaryreport
###
# Root-Only Routes
@@ -683,26 +685,6 @@
overview_plots=overview_plots, legend=legend,
use_day_axis=use_day_axis)
- at v4_route("/daily_report")
-def v4_daily_report_overview():
- # For now, redirect to the report for the most recent submitted run's date.
-
- ts = request.get_testsuite()
-
- # Get the latest run.
- latest = ts.query(ts.Run).\
- order_by(ts.Run.start_time.desc()).limit(1).first()
-
- # If we found a run, use it's start time.
- if latest:
- date = latest.start_time
- else:
- # Otherwise, just use today.
- date = datetime.date.today()
-
- return redirect(v4_url_for("v4_daily_report",
- year=date.year, month=date.month, day=date.day))
-
@v4_route("/global_status")
def v4_global_status():
from lnt.server.ui import util
@@ -801,150 +783,41 @@
selected_field=field,
selected_revision=revision)
- at v4_route("/daily_report/<int:year>/<int:month>/<int:day>")
-def v4_daily_report(year, month, day):
- import datetime
- from lnt.server.ui import util
+ at v4_route("/daily_report")
+def v4_daily_report_overview():
+ # Redirect to the report for the most recent submitted run's date.
ts = request.get_testsuite()
- # The number of previous days we are going to report on.
- num_prior_days_to_include = 3
-
- # Construct datetime instances for the report range.
- day_ordinal = datetime.datetime(year, month, day).toordinal()
+ # Get the latest run.
+ latest = ts.query(ts.Run).\
+ order_by(ts.Run.start_time.desc()).limit(1).first()
- next_day = datetime.datetime.fromordinal(day_ordinal + 1)
- prior_days = [datetime.datetime.fromordinal(day_ordinal - i)
- for i in range(num_prior_days_to_include + 1)]
-
- # Adjust the dates time component. As we typically want to do runs
- # overnight, we define "daily" to really mean "at 0700".
- day_start_offset = datetime.timedelta(hours=7)
- next_day += day_start_offset
- for i,day in enumerate(prior_days):
- prior_days[i] = day + day_start_offset
-
- # Find all the runs that occurred for each day slice.
- prior_runs = [ts.query(ts.Run).\
- filter(ts.Run.start_time > prior_day).\
- filter(ts.Run.start_time <= day).all()
- for day,prior_day in util.pairs(prior_days)]
+ # If we found a run, use it's start time.
+ if latest:
+ date = latest.start_time
+ else:
+ # Otherwise, just use today.
+ date = datetime.date.today()
- # For every machine, we only want to report on the last run order that was
- # reported for that machine for the particular day range.
- #
- # Note that this *does not* mean that we will only report for one particular
- # run order for each day, because different machines may report on different
- # orders.
- #
- # However, we want to limit ourselves to a single run order for each
- # (day,machine) so that we don't obscure any details through our
- # aggregation.
- prior_days_machine_order_map = [None] * num_prior_days_to_include
- for i,runs in enumerate(prior_runs):
- # Aggregate the runs by machine.
- machine_to_all_orders = util.multidict()
- for r in runs:
- machine_to_all_orders[r.machine] = r.order
-
- # Create a map from machine to max order.
- prior_days_machine_order_map[i] = machine_order_map = dict(
- (machine, max(orders))
- for machine,orders in machine_to_all_orders.items())
-
- # Update the run list to only include the runs with that order.
- prior_runs[i] = [r for r in runs
- if r.order is machine_order_map[r.machine]]
-
- # Form a list of all relevant runs.
- relevant_runs = sum(prior_runs, [])
-
- # Find the union of all machines reporting in the relevant runs.
- reporting_machines = list(set(r.machine for r in relevant_runs))
- reporting_machines.sort(key = lambda m: m.name)
-
- # We aspire to present a "lossless" report, in that we don't ever hide any
- # possible change due to aggregation. In addition, we want to make it easy
- # to see the relation of results across all the reporting machines. In
- # particular:
- #
- # (a) When a test starts failing or passing on one machine, it should be
- # easy to see how that test behaved on other machines. This makes it
- # easy to identify the scope of the change.
- #
- # (b) When a performance change occurs, it should be easy to see the
- # performance of that test on other machines. This makes it easy to
- # see the scope of the change and to potentially apply human
- # discretion in determining whether or not a particular result is
- # worth considering (as opposed to noise).
- #
- # The idea is as follows, for each (machine, test, primary_field), classify
- # the result into one of REGRESSED, IMPROVED, UNCHANGED_FAIL, ADDED,
- # REMOVED, PERFORMANCE_REGRESSED, PERFORMANCE_IMPROVED.
- #
- # For now, we then just aggregate by test and present the results as
- # is. This is lossless, but not nearly as nice to read as the old style
- # per-machine reports. In the future we will want to find a way to combine
- # the per-machine report style of presenting results aggregated by the kind
- # of status change, while still managing to present the overview across
- # machines.
-
- # Batch load all of the samples reported by all these runs.
- columns = [ts.Sample.run_id,
- ts.Sample.test_id]
- columns.extend(f.column
- for f in ts.sample_fields)
- samples = ts.query(*columns).\
- filter(ts.Sample.run_id.in_(
- r.id for r in relevant_runs)).all()
+ return redirect(v4_url_for("v4_daily_report",
+ year=date.year, month=date.month, day=date.day))
- # Find the union of tests reported in the relevant runs.
- #
- # FIXME: This is not particularly efficient, should we just use all tests in
- # the database?
- reporting_tests = ts.query(ts.Test).\
- filter(ts.Test.id.in_(set(s[1] for s in samples))).\
- order_by(ts.Test.name).all()
+ at v4_route("/daily_report/<int:year>/<int:month>/<int:day>")
+def v4_daily_report(year, month, day):
+ ts = request.get_testsuite()
- # Aggregate all of the samples by (run_id, test_id).
- sample_map = util.multidict()
- for s in samples:
- sample_map[(s[0], s[1])] = s[2:]
-
- # Build the result table:
- # result_table[test_index][day_index][machine_index] = {samples}
- result_table = []
- for test in reporting_tests:
- key = test
- test_results = []
- for day_runs in prior_runs:
- day_results = []
- for machine in reporting_machines:
- # Collect all the results for this machine.
- results = [s
- for run in day_runs
- if run.machine is machine
- for s in sample_map.get((run.id, test.id), ())]
- day_results.append(results)
- test_results.append(day_results)
- result_table.append(test_results)
+ # Create the report object.
+ report = lnt.server.reporting.dailyreport.DailyReport(ts, year, month, day)
- # FIXME: Now compute ComparisonResult objects for each (test, machine, day).
+ # Build the report.
+ report.build()
- return render_template(
- "v4_daily_report.html", ts=ts, day_start_offset=day_start_offset,
- num_prior_days_to_include=num_prior_days_to_include,
- reporting_machines=reporting_machines, reporting_tests=reporting_tests,
- prior_days=prior_days, next_day=next_day,
- prior_days_machine_order_map=prior_days_machine_order_map,
- result_table=result_table)
+ return render_template("v4_daily_report.html", ts=ts, report=report)
###
# Cross Test-Suite V4 Views
-import lnt.server.reporting.summaryreport
-
def get_summary_config_path():
return os.path.join(current_app.old_config.tempDir,
'summary_report_config.json')
@@ -1001,8 +874,6 @@
@db_route("/summary_report", only_v3=False)
def v4_summary_report():
- # FIXME: Add a UI for defining the report configuration.
-
# Load the summary report configuration.
config_path = get_summary_config_path()
if not os.path.exists(config_path):
More information about the llvm-commits
mailing list