[LNT] r237658 - Refactor ComparisonResult to be more self contained

James Molloy james at jamesmolloy.co.uk
Wed Jun 3 07:16:31 PDT 2015


Hi Chris,

I just updated our LNT instance and this commit causes the following
internal server error:

... snip ...
  File "/home/llvm-test/sandbox/lnt/lnt/server/ui/templates/v4_run.html",
line 334, in block "body"
    {% set cr = request_info.sri.get_geomean_comparison_result(
  File "/home/llvm-test/sandbox/lnt/lnt/server/reporting/analysis.py", line
286, in get_geomean_comparison_result
    bigger_is_better=field.bigger_is_better)
  File "/home/llvm-test/sandbox/lnt/lnt/server/reporting/analysis.py", line
60, in __init__
    self.stddev = stats.standard_deviation(samples)
  File "/home/llvm-test/sandbox/lnt/lnt/util/stats.py", line 36, in
standard_deviation
    m = mean(l)
  File "/home/llvm-test/sandbox/lnt/lnt/util/stats.py", line 15, in mean
    return sum(l)/len(l)
TypeError: unsupported operand type(s) for +: 'int' and 'NoneType'

Could you please take a look?

Cheers,

James

[P.S: if you reply to james.molloy at arm.com perhaps I might get your reply -
I still lay the blame at gmail's forwarding eating your messages]

On Tue, 19 May 2015 at 03:11 Chris Matthews <cmatthews5 at apple.com> wrote:

> Author: cmatthews
> Date: Mon May 18 20:59:20 2015
> New Revision: 237658
>
> URL: http://llvm.org/viewvc/llvm-project?rev=237658&view=rev
> Log:
> Refactor ComparisonResult to be more self contained
>
> Modified:
>     lnt/trunk/lnt/server/reporting/analysis.py
>     lnt/trunk/lnt/server/ui/templates/v4_run.html
>     lnt/trunk/tests/server/reporting/analysis.py
>
> Modified: lnt/trunk/lnt/server/reporting/analysis.py
> URL:
> http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/reporting/analysis.py?rev=237658&r1=237657&r2=237658&view=diff
>
> ==============================================================================
> --- lnt/trunk/lnt/server/reporting/analysis.py (original)
> +++ lnt/trunk/lnt/server/reporting/analysis.py Mon May 18 20:59:20 2015
> @@ -31,43 +31,67 @@ def calc_geomean(run_values):
>  class ComparisonResult:
>      """A ComparisonResult is ultimatly responsible for determining if a
> test
>      improves, regresses or does not change, given some new and old
> data."""
> -
> -    def __init__(self,cur_value, prev_value, delta, pct_delta, stddev,
> MAD,
> -                 cur_failed, prev_failed, samples, prev_samples,
> stddev_mean = None,
> -                 confidence_lv = .05, bigger_is_better = False):
> -        self.current = cur_value
> -        self.previous = prev_value
> -        self.delta = delta
> -        self.pct_delta = pct_delta
> -        self.stddev = stddev
> -        self.MAD = MAD
> +
> +    def __init__(self, aggregation_fn,
> +                 cur_failed, prev_failed, samples, prev_samples,
> +                 confidence_lv=0.05, bigger_is_better=False):
> +        self.aggregation_fn = aggregation_fn
> +        if samples:
> +            self.current = aggregation_fn(samples)
> +        else:
> +            self.current = None
> +        if prev_samples:
> +            self.previous = aggregation_fn(prev_samples)
> +        else:
> +            self.previous = None
> +
> +        # Compute the comparison status for the test value.
> +        if self.current and self.previous and self.previous != 0:
> +            self.delta = self.current - self.previous
> +            self.pct_delta = self.delta / self.previous
> +        else:
> +            self.delta = 0
> +            self.pct_delta = 0.0
> +
> +        # If we have multiple values for this run, use that to estimate
> the
> +        # distribution.
> +        if samples and len(samples) > 1:
> +            self.stddev = stats.standard_deviation(samples)
> +            self.MAD = stats.median_absolute_deviation(samples)
> +        else:
> +            self.stddev = None
> +            self.MAD = None
> +
> +        self.stddev_mean = None  # Only calculate this if needed.
>          self.failed = cur_failed
>          self.prev_failed = prev_failed
>          self.samples = samples
>          self.prev_samples = prev_samples
> -        self.stddev_mean = stddev_mean
> +
>          self.confidence_lv = confidence_lv
>          self.bigger_is_better = bigger_is_better
>
> +    @property
> +    def stddev_mean(self):
> +        """The mean around stddev for current sampples. Cached after
> first call.
> +        """
> +        if not self.stddev_mean:
> +            self.stddev_mean = stats.mean(self.samples)
> +        return self.stddev_mean
> +
>      def __repr__(self):
>          """Print this ComparisonResult's constructor.
> -
> +
>          Handy for generating test cases for comparisons doing odd
> things."""
> -        frmt = "{}(" + "{}, " * 11 + ")"
> -        return frmt.format("ComparisonResult",
> -                           self.current,
> -                           self.previous,
> -                           self.delta,
> -                           self.pct_delta,
> -                           self.stddev,
> -                           self.MAD,
> -                           self.failed,
> -                           self.prev_failed,
> -                           self.samples,
> -                           self.prev_samples,
> -                           self.stddev_mean,
> -                           self.confidence_lv,
> -                           self.bigger_is_better)
> +        fmt = "{}(" + "{}, " * 7 + ")"
> +        return fmt.format(self.__class__.__name__,
> +                          self.aggregation_fn.__name__,
> +                          self.failed,
> +                          self.prev_failed,
> +                          self.samples,
> +                          self.prev_samples,
> +                          self.confidence_lv,
> +                          bool(self.bigger_is_better))
>
>      def is_result_interesting(self):
>          """is_result_interesting() -> bool
> @@ -237,77 +261,27 @@ class RunInfo(object):
>                        if s[field.index] is not None]
>          prev_values = [s[field.index] for s in prev_samples
>                         if s[field.index] is not None]
> -        if run_values:
> -            run_value = self.aggregation_fn(run_values)
> -        else:
> -            run_value = None
> -        if prev_values:
> -            prev_value = self.aggregation_fn(prev_values)
> -        else:
> -            prev_value = None
> -
> -        # If we have multiple values for this run, use that to estimate
> the
> -        # distribution.
> -        if run_values and len(run_values) > 1:
> -            stddev = stats.standard_deviation(run_values)
> -            MAD = stats.median_absolute_deviation(run_values)
> -            stddev_mean = stats.mean(run_values)
> -        else:
> -            stddev = None
> -            MAD = None
> -            stddev_mean = None
> -
> -        # If we are missing current or comparison values we are done.
> -        if run_value is None or prev_value is None:
> -            return ComparisonResult(
> -                run_value, prev_value, delta=None,
> -                pct_delta = None, stddev = stddev, MAD = MAD,
> -                cur_failed = run_failed, prev_failed = prev_failed,
> -                samples = run_values, prev_samples = prev_values,
> -                confidence_lv = self.confidence_lv,
> -                bigger_is_better = field.bigger_is_better)
> -
> -        # Compute the comparison status for the test value.
> -        delta = run_value - prev_value
> -        if prev_value != 0:
> -            pct_delta = delta / prev_value
> -        else:
> -            pct_delta = 0.0
> -
> -        return ComparisonResult(run_value, prev_value, delta,
> -                                pct_delta, stddev, MAD,
> -                                run_failed, prev_failed, run_values,
> -                                prev_values, stddev_mean,
> self.confidence_lv,
> -                                bigger_is_better = field.bigger_is_better)
> -
> +
> +        r = ComparisonResult(self.aggregation_fn,
> +                             run_failed, prev_failed, run_values,
> +                             prev_values, self.confidence_lv,
> +                             bigger_is_better=field.bigger_is_better)
> +        print repr(r)
> +        return r
>
> -    def get_geomean_comparison_result(self, run, compare_to, field, tests,
> -                                      comparison_window=[]):
> +    def get_geomean_comparison_result(self, run, compare_to, field,
> tests):
>          if tests:
>              prev_values,run_values = zip(*[(cr.previous,cr.current) for
> _,_,cr in tests])
>          else:
>              prev_values,run_values = [], []
>
> -        run_geomean = calc_geomean(run_values)
> -        prev_geomean = calc_geomean(prev_values)
> -
> -        if run_geomean and prev_geomean:
> -            delta = run_geomean - prev_geomean
> -            if prev_geomean != 0:
> -                pct_delta = delta / prev_geomean
> -            else:
> -                pct_delta = 0.0
> -        else:
> -            delta = pct_delta = 0
> -
> -        return ComparisonResult(run_geomean, prev_geomean, delta,
> -                                pct_delta, stddev=None, MAD=None,
> -                                cur_failed=run_values and not run_geomean,
> -                                prev_failed=prev_values and not
> prev_geomean,
> -                                samples=[run_geomean] if run_geomean else
> [],
> -                                prev_samples=[prev_geomean] if
> prev_geomean else [],
> +        return ComparisonResult(calc_geomean,
> +                                cur_failed=bool(run_values),
> +                                prev_failed=bool(prev_values),
> +                                samples=run_values,
> +                                prev_samples=prev_values,
>                                  confidence_lv=0,
> -                                bigger_is_better = field.bigger_is_better)
> +                                bigger_is_better=field.bigger_is_better)
>
>      def _load_samples_for_runs(self, run_ids):
>          # Find the set of new runs to load.
>
> Modified: lnt/trunk/lnt/server/ui/templates/v4_run.html
> URL:
> http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/server/ui/templates/v4_run.html?rev=237658&r1=237657&r2=237658&view=diff
>
> ==============================================================================
> --- lnt/trunk/lnt/server/ui/templates/v4_run.html (original)
> +++ lnt/trunk/lnt/server/ui/templates/v4_run.html Mon May 18 20:59:20 2015
> @@ -332,7 +332,7 @@
>          </tbody>
>          <tfoot>
>            {% set cr = request_info.sri.get_geomean_comparison_result(
> -                      run, compare_to, field, tests,
> request_info.comparison_window) %}
> +                      run, compare_to, field, tests) %}
>            <td><input type="checkbox" name="mean" value="{{machine.id
> }}.{{field.index}}"></td>
>            <td><a href="{{graph_base}}&mean={{machine.id}}.{{field.index}}">Geometric
> Mean</a></td>
>            {{ get_cell_value(cr) }}
>
> Modified: lnt/trunk/tests/server/reporting/analysis.py
> URL:
> http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/server/reporting/analysis.py?rev=237658&r1=237657&r2=237658&view=diff
>
> ==============================================================================
> --- lnt/trunk/tests/server/reporting/analysis.py (original)
> +++ lnt/trunk/tests/server/reporting/analysis.py Mon May 18 20:59:20 2015
> @@ -4,7 +4,7 @@
>  import unittest
>  import lnt.util.stats as stats
>  from lnt.server.reporting.analysis import ComparisonResult, REGRESSED,
> IMPROVED
> -from lnt.server.reporting.analysis import UNCHANGED_PASS
> +from lnt.server.reporting.analysis import UNCHANGED_PASS, UNCHANGED_FAIL
>
>
>  class ComparisonResultTest(unittest.TestCase):
> @@ -13,15 +13,8 @@ class ComparisonResultTest(unittest.Test
>      def test_comp(self):
>          """Test a real example."""
>          curr_samples = [0.0887, 0.0919, 0.0903]
> -        prev = 0.0858
> -        cur = min(curr_samples)
> -        stddev = stats.standard_deviation(curr_samples)
> -        MAD = stats.median_absolute_deviation(curr_samples)
> -        stddev_mean = stats.mean(curr_samples)
> -        uninteresting = ComparisonResult(cur, prev, cur-prev,
> -                                         (cur-prev)/prev, stddev, MAD,
> -                                         False, False, curr_samples,
> [prev],
> -                                         stddev_mean)
> +        prev = [0.0858]
> +        uninteresting = ComparisonResult(min, False, False, curr_samples,
> prev)
>
>          self.assertFalse(uninteresting.is_result_interesting())
>          self.assertEquals(uninteresting.get_test_status(), UNCHANGED_PASS)
> @@ -29,31 +22,36 @@ class ComparisonResultTest(unittest.Test
>
>      def test_slower(self):
>          """Test getting a simple regression."""
> -        slower = ComparisonResult(10, 5, 5, 0.5, None, None,
> -                                  False, False, [10], [5], None)
> +        slower = ComparisonResult(min,
> +                                  False, False, [10], [5])
>          self.assertEquals(slower.get_value_status(), REGRESSED)
>          self.assertTrue(slower.is_result_interesting())
>
>      def test_faster(self):
>          """Test getting a simple improvement."""
>
> -        faster = ComparisonResult(5, 10, -5, -0.5, None, None,
> -                                  False, False, [5], [10], None)
> +        faster = ComparisonResult(min,
> +                                  False, False, [5], [10])
>          self.assertEquals(faster.get_value_status(), IMPROVED)
>          self.assertTrue(faster.is_result_interesting())
>
>      def test_improved_status(self):
>          """Test getting a test status improvement."""
> -        improved = ComparisonResult(None, None, None, None, None, None,
> -                                    False, True, [5], [10], None)
> +        improved = ComparisonResult(min,
> +                                    False, True, [1], None)
>          self.assertEquals(improved.get_test_status(), IMPROVED)
>
>      def test_regressed_status(self):
>          """Test getting a test status improvement."""
> -        improved = ComparisonResult(None, None, None, None, None, None,
> -                                    True, False, [5], [10], None)
> +        improved = ComparisonResult(min,
> +                                    True, False, None, [10])
>          self.assertEquals(improved.get_test_status(), REGRESSED)
>
> +    def test_keep_on_failing_status(self):
> +        """Test getting a repeated fail."""
> +        improved = ComparisonResult(min,
> +                                    True, True, None, None)
> +        self.assertEquals(improved.get_test_status(), UNCHANGED_FAIL)
>
>  if __name__ == '__main__':
>      unittest.main()
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20150603/611c1681/attachment.html>


More information about the llvm-commits mailing list