[LNT] r256297 - Add a manual regression creation link to the graphs
Chris Matthews via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 22 15:13:31 PST 2015
Author: cmatthews
Date: Tue Dec 22 17:13:30 2015
New Revision: 256297
URL: http://llvm.org/viewvc/llvm-project?rev=256297&view=rev
Log:
Add a manual regression creation link to the graphs
Sometimes a change has not been detected, it is nice to be able to
create a regression anyways. To do this we take the runID and url
which have been exposed into the graphs, and create a server endpoint
that works a lot like fieldchange generation processes. It will always
produce a regression though, even if the changes are not that
interesting.
Modified:
lnt/trunk/lnt/server/ui/regression_views.py
lnt/trunk/lnt/server/ui/static/lnt_graph.js
lnt/trunk/lnt/server/ui/templates/v4_regression_detail.html
lnt/trunk/lnt/server/ui/views.py
lnt/trunk/tests/server/ui/V4Pages.py
Modified: lnt/trunk/lnt/server/ui/regression_views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/regression_views.py?rev=256297&r1=256296&r2=256297&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/regression_views.py (original)
+++ lnt/trunk/lnt/server/ui/regression_views.py Tue Dec 22 17:13:30 2015
@@ -17,6 +17,7 @@ from lnt.server.ui.globals import db_url
from random import randint
from sqlalchemy import desc, asc
+import sqlalchemy
from lnt.server.ui.util import FLASH_DANGER, FLASH_INFO, FLASH_SUCCESS
from lnt.server.reporting.analysis import REGRESSED
from wtforms import SelectMultipleField, StringField, widgets, SelectField
@@ -135,12 +136,13 @@ def calc_impact(ts, fcs):
cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better)
crs.append(cr)
if crs:
- olds = sum([x.previous for x in crs])
- news = sum([x.current for x in crs])
- new_cr = PrecomputedCR(olds, news, crs[0].bigger_is_better) # TODO both directions
- return new_cr
- else:
- return PrecomputedCR(1, 1, True)
+ olds = sum([x.previous for x in crs if x.previous])
+ news = sum([x.current for x in crs if x.current])
+ if olds and news:
+ new_cr = PrecomputedCR(olds, news, crs[0].bigger_is_better) # TODO both directions
+ return new_cr
+
+ return PrecomputedCR(1, 1, True)
class MergeRegressionForm(Form):
@@ -266,15 +268,7 @@ def v4_regression_detail(id):
regression_indicators = ts.query(ts.RegressionIndicator) \
.filter(ts.RegressionIndicator.regression_id == id) \
.all()
- # indicators = []
- # for regression in regression_indicators:
- # fc = regression.field_change
- # cr, key_run = get_cr_for_field_change(ts, fc)
- # latest_cr, _ = get_cr_for_field_change(ts, fc, current=True)
- # indicators.append(ChangeData(fc, cr, key_run, latest_cr))
-
-
- ######
+
crs = []
form.field_changes.choices = list()
@@ -301,3 +295,78 @@ def v4_hook():
ts = request.get_testsuite()
rule_hooks.post_submission_hooks(ts, 0)
abort(400)
+
+
+ at v4_route("/regressions/new_from_graph/<int:machine_id>/<int:test_id>/<int:field_index>/<int:run_id>", methods=["GET"])
+def v4_make_regression(machine_id, test_id, field_index, run_id):
+ """This function is called to make a new regression from a graph data point.
+
+ It is not nessessarly the case that there will be a real change there,
+ so we must create a regression, bypassing the normal analysis.
+
+ """
+ ts = request.get_testsuite()
+ field = ts.sample_fields[field_index]
+ new_regression_id = 0
+ run = ts.query(ts.Run).get(run_id)
+
+ runs = ts.query(ts.Run). \
+ filter(ts.Run.order_id == run.order_id). \
+ filter(ts.Run.machine_id == run.machine_id). \
+ all()
+
+ previous_runs = ts.get_previous_runs_on_machine(run, 1)
+
+ # Find our start/end order.
+ if previous_runs != []:
+ start_order = previous_runs[0].order
+ else:
+ start_order = run.order
+ end_order = run.order
+
+ # Load our run data for the creation of the new fieldchanges.
+ runs_to_load = [r.id for r in (runs + previous_runs)]
+
+ runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load)
+
+ result = runinfo.get_comparison_result(runs, previous_runs,
+ test_id, field)
+
+ # Try and find a matching FC and update, else create one.
+ f = None
+
+ try:
+ f = ts.query(ts.FieldChange) \
+ .filter(ts.FieldChange.start_order == start_order) \
+ .filter(ts.FieldChange.end_order == end_order) \
+ .filter(ts.FieldChange.test_id == test_id) \
+ .filter(ts.FieldChange.machine == run.machine) \
+ .filter(ts.FieldChange.field == field) \
+ .one()
+ except sqlalchemy.orm.exc.NoResultFound:
+ f = None
+
+ if not f:
+ test = ts.query(ts.Test).filter(ts.Test.id == test_id).one()
+ f = ts.FieldChange(start_order=start_order,
+ end_order=run.order,
+ machine=run.machine,
+ test=test,
+ field=field)
+ ts.add(f)
+ # Always update FCs with new values.
+ if f:
+ f.old_value = result.previous
+ f.new_value = result.current
+ f.run = run
+ ts.commit()
+
+ # Make new regressions.
+ regression = new_regression(ts, [f.id])
+ regression.state = RegressionState.ACTIVE
+
+ ts.commit()
+ note("Manually created new regressions: {}".format(regression.id))
+ flash("Created " + regression.title, FLASH_SUCCESS)
+
+ return redirect(v4_url_for("v4_regression_detail", id=regression.id))
Modified: lnt/trunk/lnt/server/ui/static/lnt_graph.js
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/static/lnt_graph.js?rev=256297&r1=256296&r2=256297&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/static/lnt_graph.js (original)
+++ lnt/trunk/lnt/server/ui/static/lnt_graph.js Tue Dec 22 17:13:30 2015
@@ -40,6 +40,19 @@ function get_run_url(db, ts, runID) {
return [prefix, "db_" + db, "v4", ts, runID].join('/');
}
+// Create a new regression manually URL.
+function get_manual_regression_url(db, ts, url, runID) {
+ "use strict";
+ return [prefix,
+ "db_" + db,
+ "v4",
+ ts,
+ "regressions/new_from_graph",
+ url,
+ runID].join('/');
+}
+
+
/* Bind events to the zoom bar buttons, so that
* the zoom buttons work, then position them
@@ -107,6 +120,12 @@ function show_tooltip(x, y, item, pos, g
get_run_url(db_name, test_suite_name, meta_data.runID) +
"\">" + meta_data.runID + "<br>";
}
+
+ if (meta_data.runID && item.series.url) {
+ tip_body += "<a href=\"" +
+ get_manual_regression_url(db_name, test_suite_name, item.series.url, meta_data.runID) +
+ "\">Mark Change.<br>";
+ }
tip_body += "</div>";
var tooltip_div = $(tip_body).css({
@@ -299,7 +318,7 @@ function update_graph() {
color = color_codes[i % color_codes.length];
data = try_normal(data_cache[i], changes[i].start);
to_draw.push(make_graph_point_entry(data, color, false));
- to_draw.push({"color": color, "data": data});
+ to_draw.push({"color": color, "data": data, "url": changes[i].url});
}
}
// Regressions.
Modified: lnt/trunk/lnt/server/ui/templates/v4_regression_detail.html
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/templates/v4_regression_detail.html?rev=256297&r1=256296&r2=256297&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/templates/v4_regression_detail.html (original)
+++ lnt/trunk/lnt/server/ui/templates/v4_regression_detail.html Tue Dec 22 17:13:30 2015
@@ -37,7 +37,7 @@
{% block javascript %}
var g = {};
var test_suite_name = "{{ testsuite_name }}";
-var db_name = "{{ request.view_args.db_name }}";
+var db_name = "{{ request.view_args.get('db_name','') }}";
var changes = [
{% for form_change in form.field_changes%}
Modified: lnt/trunk/lnt/server/ui/views.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/views.py?rev=256297&r1=256296&r2=256297&view=diff
==============================================================================
--- lnt/trunk/lnt/server/ui/views.py (original)
+++ lnt/trunk/lnt/server/ui/views.py Tue Dec 22 17:13:30 2015
@@ -638,7 +638,7 @@ def v4_graph():
# we want to load. Actually, we should just make this a single query.
#
# FIXME: Don't hard code field name.
- q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time).\
+ q = ts.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id).\
join(ts.Run).join(ts.Order).\
filter(ts.Run.machine_id == machine.id).\
filter(ts.Sample.test == test).\
@@ -651,10 +651,10 @@ def v4_graph():
(field.status_field.column == None))
# Aggregate by revision.
- data = util.multidict((rev, (val, date)) for val,rev,date in q).items()
+ data = util.multidict((rev, (val, date, run_id)) for val,rev,date,run_id in q).items()
data.sort(key=lambda sample: convert_revision(sample[0]))
- graph_datum.append((test.name, data, col, field))
+ graph_datum.append((test.name, data, col, field, url))
# Get baselines for this line
num_baselines = len(baseline_parameters)
@@ -709,9 +709,9 @@ def v4_graph():
# Sort data points according to revision number.
data.sort(key=lambda sample: convert_revision(sample[0]))
- graph_datum.append((test_name, data, col, field))
+ graph_datum.append((test_name, data, col, field, None))
- for name, data, col, field in graph_datum:
+ for name, data, col, field, url in graph_datum:
# Compute the graph points.
errorbar_data = []
points_data = []
@@ -730,6 +730,8 @@ def v4_graph():
data = [data_date[0] for data_date in datapoints]
# And the date on which they were taken.
dates = [data_date[1] for data_date in datapoints]
+ # Run where this point was collected.
+ runs = [data_pts[2] for data_pts in datapoints if len(data_pts)==3]
# When we can, map x-axis to revisions, but when that is too hard
# use the position of the sample instead.
@@ -745,8 +747,11 @@ def v4_graph():
for (index, value) in enumerate(values))
# Generate metadata.
- metadata = {"label":point_label}
+ metadata = {"label": point_label}
metadata["date"] = str(dates[agg_index])
+ if runs:
+ metadata["runID"] = str(runs[agg_index])
+
if len(graph_datum) > 1:
# If there are more than one plot in the graph, also label the
# test name.
@@ -808,10 +813,12 @@ def v4_graph():
# Add the minimum line plot, if requested.
if show_lineplot:
- graph_plots.append({
- "data" : pts,
- "color" : util.toColorString(col) })
-
+ plot = {"data" : pts,
+ "color" : util.toColorString(col)
+ }
+ if url:
+ plot["url"] = url
+ graph_plots.append(plot)
# Add regression line, if requested.
if show_linear_regression:
xs = [t for t,v,_ in pts]
@@ -848,15 +855,18 @@ def v4_graph():
# Add the points plot, if used.
if points_data:
pts_col = (0,0,0)
- graph_plots.append({
- "data" : points_data,
+ plot = {"data" : points_data,
"color" : util.toColorString(pts_col),
- "lines" : {
- "show" : False },
+ "lines" : {"show" : False },
"points" : {
"show" : True,
"radius" : .25,
- "fill" : True } })
+ "fill" : True
+ }
+ }
+ if url:
+ plot['url'] = url
+ graph_plots.append(plot)
# Add the error bar plot, if used.
if errorbar_data:
Modified: lnt/trunk/tests/server/ui/V4Pages.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/server/ui/V4Pages.py?rev=256297&r1=256296&r2=256297&view=diff
==============================================================================
--- lnt/trunk/tests/server/ui/V4Pages.py (original)
+++ lnt/trunk/tests/server/ui/V4Pages.py Tue Dec 22 17:13:30 2015
@@ -42,6 +42,7 @@ def check_json(client, url, expected_cod
def check_redirect(client, url, expected_redirect_regex):
+ """Check the client returns the expected redirect on this URL."""
resp = client.get(url, follow_redirects=False)
assert resp.status_code == 302, \
"Call to %s returned: %d, not the expected %d"%(url, resp.status_code, 302)
@@ -323,6 +324,13 @@ def main():
# Check some variations of the daily report work.
check_code(client, '/v4/compile/daily_report/2014/6/5?day_start=16')
check_code(client, '/v4/compile/daily_report/2014/6/4')
+
+ check_redirect(client, '/v4/nts/regressions/new_from_graph/1/1/1/1', '/v4/nts/regressions/1')
+ check_code(client, '/v4/nts/regressions/')
+
+ check_code(client, '/v4/nts/regressions/1')
+
+
if __name__ == '__main__':
More information about the llvm-commits
mailing list