[LNT] r218080 - Add rerun flag: when passed, rerun benchmarks which the server says changed
Chris Matthews
cmatthews5 at apple.com
Thu Sep 18 14:51:10 PDT 2014
Author: cmatthews
Date: Thu Sep 18 16:51:10 2014
New Revision: 218080
URL: http://llvm.org/viewvc/llvm-project?rev=218080&view=rev
Log:
Add rerun flag: when passed, rerun benchmarks which the server says changed
Added:
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/Makefile
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/README.txt
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/TEST.simple.report
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure (with props)
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/fake-report.simple.csv
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/
lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/Makefile
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl (with props)
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile.config
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct-bitfield.simple.report.txt
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct_pack_layout-1.simple.report.txt
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/vla.simple.report.txt
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/README.txt
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/TEST.simple.report
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure (with props)
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/fake-report.simple.csv
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/Makefile
lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/vla.simple.report.txt
lnt/trunk/tests/runtest/Inputs/rerun_server_instance/
lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/
lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/lnt.db
lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.cfg
lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.wsgi
lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh (with props)
lnt/trunk/tests/runtest/rerun.py
Modified:
lnt/trunk/lnt/tests/nt.py
lnt/trunk/tests/lit.cfg
Modified: lnt/trunk/lnt/tests/nt.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/nt.py?rev=218080&r1=218079&r2=218080&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/nt.py (original)
+++ lnt/trunk/lnt/tests/nt.py Thu Sep 18 16:51:10 2014
@@ -5,20 +5,30 @@ import re
import shutil
import subprocess
import sys
+import glob
import time
import traceback
-
from datetime import datetime
+from optparse import OptionParser, OptionGroup
+
import lnt.testing
import lnt.testing.util.compilers
import lnt.util.ImportData as ImportData
-from lnt.testing.util.commands import note, warning, error, fatal, resolve_command_path
+from lnt.testing.util.commands import note, warning, fatal
from lnt.testing.util.commands import capture, mkdir_p, which
+from lnt.testing.util.commands import resolve_command_path
+
from lnt.testing.util.rcs import get_source_version
+
from lnt.testing.util.misc import timestamp
+from lnt.server.reporting.analysis import UNCHANGED_PASS, UNCHANGED_FAIL
+from lnt.server.reporting.analysis import REGRESSED, IMPROVED
+from lnt.util import ImportData
+import builtintest
+
###
class TestModule(object):
@@ -170,6 +180,11 @@ class TestConfiguration(object):
assert self.qemu_user_mode
return ' '.join([self.qemu_user_mode] + self.qemu_flags)
+ @property
+ def generate_report_script(self):
+ """ The path to the report generation script. """
+ return os.path.join(self.test_suite_root, "GenerateReport.pl")
+
def build_report_path(self, iteration):
"""The path of the results.csv file which each run of the test suite
will produce.
@@ -642,6 +657,9 @@ def execute_nt_tests(test_log, make_vari
if res != 0:
print >> sys.stderr, "Failure while running nightly tests! See log: %s" % (test_log.name)
+# Keep a mapping of mangled test names, to the original names in the test-suite.
+TEST_TO_NAME = {}
+
def load_nt_report_file(report_path, config):
# Compute the test samples to report.
sample_keys = []
@@ -694,18 +712,26 @@ def load_nt_report_file(report_path, con
record = dict(zip(header, row))
program = record['Program']
+
if config.only_test is not None:
program = os.path.join(config.only_test, program)
- test_base_name = '%s.%s' % (test_namespace, program.replace('.','_'))
+ program_real = program
+ program_mangled = program.replace('.','_')
+ test_base_name = program_mangled
# Check if this is a subtest result, in which case we ignore missing
# values.
if '_Subtest_' in test_base_name:
is_subtest = True
test_base_name = test_base_name.replace('_Subtest_', '.')
+
else:
is_subtest = False
+ test_base_name = '%s.%s' % (test_namespace, test_base_name)
+
+ TEST_TO_NAME[test_base_name] = program_real
+
for info in sample_keys:
if len(info) == 3:
name,key,tname = info
@@ -1045,8 +1071,283 @@ def run_test(nick_prefix, iteration, con
###
-import builtintest
-from optparse import OptionParser, OptionGroup
+def _construct_report_path(basedir, only_test, test_style, file_type="csv"):
+ """Get the full path to report files in the sandbox.
+ """
+ report_path = os.path.join(basedir)
+ if only_test is not None:
+ report_path = os.path.join(report_path, only_test)
+ report_path = os.path.join(report_path, ('report.%s.' % test_style) + file_type)
+ return report_path
+
+
+def rerun_test(config, name, num_times):
+ """Take the test at name, and rerun it num_times with the previous settings
+ stored in config.
+
+ """
+ # Extend the old log file.
+ logfile = open(config.test_log_path(None), 'a')
+
+ # Grab the real test name instead of the LNT benchmark URL.
+ real_name = TEST_TO_NAME["nts." + name]
+
+ relative_test_path = os.path.dirname(real_name)
+ test_name = os.path.basename(real_name)
+
+ test_full_path = os.path.join(
+ config.report_dir, relative_test_path)
+
+ assert os.path.exists(test_full_path), "Previous test directory not there?" + \
+ test_full_path
+
+ results = []
+ for _ in xrange(0, num_times):
+ test_results = _execute_test_again(config,
+ test_name,
+ test_full_path,
+ relative_test_path,
+ logfile)
+ results.extend(test_results)
+
+ # Check we got an exec and status from each run.
+ assert len(results) >= num_times, "Did not get all the runs?" + str(results)
+
+ logfile.close()
+ return results
+
+
+def _prepare_testsuite_for_rerun(test_name, test_full_path, config):
+ """Rerun step 1: wipe out old files to get ready for rerun.
+
+ """
+ output = os.path.join(test_full_path, "Output/")
+ test_path_prefix = output + test_name + "."
+ os.remove(test_path_prefix + "out-" + config.test_style)
+
+ # Remove all the test-suite accounting files for this benchmark
+ to_go = glob.glob(test_path_prefix + "*.time")
+ to_go.extend(glob.glob(test_path_prefix + "*.txt"))
+ to_go.extend(glob.glob(test_path_prefix + "*.csv"))
+
+ assert len(to_go) >= 1, "Missing at least one accounting file."
+ for path in to_go:
+ print "Removing:", path
+ os.remove(path)
+
+
+def _execute_test_again(config, test_name, test_path, test_relative_path, logfile):
+ """(Re)Execute the benchmark of interest. """
+
+ _prepare_testsuite_for_rerun(test_name, test_path, config)
+
+ # Grab old make invocation.
+ mk_vars, _ = config.compute_run_make_variables()
+ to_exec = ['make', '-k']
+ to_exec.extend('%s=%s' % (k, v) for k, v in mk_vars.items())
+
+ # We need to run the benchmark's makefile, not the global one.
+ if config.only_test is not None:
+ to_exec.extend(['-C', config.only_test])
+ else:
+ if test_relative_path:
+ to_exec.extend(['-C', test_relative_path])
+
+ # The target for the specific benchmark.
+ # Make target.
+ benchmark_report_target = "Output/" + test_name + \
+ "." + config.test_style + ".report.txt"
+ # Actual file system location of the target.
+ benchmark_report_path = os.path.join(config.build_dir(None),
+ test_path,
+ benchmark_report_target)
+ to_exec.append(benchmark_report_target)
+
+ returncode = execute_command(logfile,
+ config.build_dir(None), to_exec, config.report_dir)
+ assert returncode == 0, "Remake command failed."
+ assert os.path.exists(benchmark_report_path), "Missing " \
+ "generated report: " + benchmark_report_path
+
+ # Now we need to pull out the results into the CSV format LNT can read.
+ schema = os.path.join(config.test_suite_root,
+ "TEST." + config.test_style + ".report")
+ result_path = os.path.join(config.build_dir(None),
+ test_path, "Output",
+ test_name + "." + config.test_style + ".report.csv")
+
+ gen_report_template = "{gen} -csv {schema} < {input} > {output}"
+ gen_cmd = gen_report_template.format(gen=config.generate_report_script,
+ schema=schema, input=benchmark_report_path, output=result_path)
+ bash_gen_cmd = ["/bin/bash", "-c", gen_cmd]
+
+ assert not os.path.exists(result_path), "Results should not exist yet." + \
+ result_path
+ returncode = execute_command(logfile,
+ config.build_dir(None), bash_gen_cmd, config.report_dir)
+ assert returncode == 0, "command failed"
+ assert os.path.exists(result_path), "Missing results file."
+
+ results = load_nt_report_file(result_path, config)
+ assert len(results) > 0
+ return results
+
+
+# When set to true, all benchmarks will be rerun.
+# TODO: remove me when rerun patch is done.
+NUMBER_OF_RERUNS = 4
+
+SERVER_FAIL = u'FAIL'
+SERVER_PASS = u'PASS'
+
+# Local test results names have these suffixes
+# Test will have the perf suffix if it passed, or
+# if it failed it will have a status suffix.
+LOCAL_COMPILE_PERF = "compile"
+LOCAL_COMPILE_STATUS = "compile.status"
+LOCAL_EXEC_PERF = "exec"
+LOCAL_EXEC_STATUS = "exec.status"
+
+# Server results have both status and performance in each entry
+SERVER_COMPILE_RESULT = "compile_time"
+SERVER_EXEC_RESULT = "execution_time"
+
+
+class PastRunData(object):
+ """To decide if we need to rerun, we must know
+ what happened on each test in the first runs.
+ Because the server returns data in a different format than
+ the local results, this class is comprised of a per-test
+ per-run aggregate of the two reports."""
+ def __init__(self, name):
+ self.name = name
+ self.compile_status = None
+ self.compile_time = None
+ self.execution_status = None
+ self.execution_time = None
+ self.valid = False
+
+ def check(self):
+ """Make sure this run data is complete."""
+ assert self.name is not None
+ msg = "Malformed test: %s" % (repr(self))
+ assert self.compile_status is not None, msg
+ assert self.execution_status is not None, msg
+ assert self.compile_time is not None, msg
+ assert self.execution_time is not None, msg
+ self.valid = True
+
+ def is_rerunable(self):
+ """Decide if we should rerun this test."""
+ assert self.valid
+ # Don't rerun if compile failed.
+ if self.compile_status == SERVER_FAIL:
+ return False
+
+ # Don't rerun on correctness failure or test pass.
+ if self.execution_status == UNCHANGED_FAIL or \
+ self.execution_status == UNCHANGED_PASS or \
+ self.execution_status == SERVER_FAIL:
+ return False
+
+ # Do rerun on regression or improvement.
+ if self.execution_status == REGRESSED or \
+ self.execution_status == IMPROVED:
+ return True
+
+ assert False, "Malformed run data: " \
+ "you should not get here. " + str(self)
+
+ def __repr__(self):
+ template = "<{}: CS {}, CT {}, ES {}, ET {}>"
+ return template.format(self.name,
+ repr(self.compile_status),
+ repr(self.compile_time),
+ repr(self.execution_status),
+ repr(self.execution_time))
+
+
+def _process_reruns(config, server_reply, local_results):
+ """Rerun each benchmark which the server reported "changed", N more
+ times.
+ """
+ server_results = server_reply['test_results'][0]['results']
+
+ # Holds the combined local and server results.
+ collated_results = dict()
+
+ for b in local_results.tests:
+ # format: suite.test/path/and/name.type<.type>
+ fields = b.name.split('.')
+ test_name = fields[1]
+ test_type = '.'.join(fields[2:])\
+
+ updating_entry = collated_results.get(test_name,
+ PastRunData(test_name))
+ if test_type == LOCAL_COMPILE_PERF:
+ updating_entry.compile_time = b.data
+ elif test_type == LOCAL_COMPILE_STATUS:
+ updating_entry.compile_status = SERVER_FAIL
+ elif test_type == LOCAL_EXEC_PERF:
+ updating_entry.execution_time = b.data
+ elif test_type == LOCAL_EXEC_STATUS:
+ updating_entry.execution_status = SERVER_FAIL
+ else:
+ assert False, "Unexpected local test type."
+
+ collated_results[test_name] = updating_entry
+
+ # Now add on top the server results to any entry we already have.
+ for full_name, results_status, perf_status in server_results:
+ test_name, test_type = full_name.split(".")
+
+ new_entry = collated_results.get(test_name, None)
+ # Some tests will come from the server, which we did not run locally.
+ # Drop them.
+ if new_entry is None:
+ continue
+ # Set these, if they were not set with fails above.
+ if SERVER_COMPILE_RESULT in test_type:
+ if new_entry.compile_status is None:
+ new_entry.compile_status = results_status
+ elif SERVER_EXEC_RESULT in test_type:
+ if new_entry.execution_status is None:
+ # If the server has not seen the test before, it will return
+ # None for the performance results analysis. In this case we
+ # will assume no rerun is needed, so assign unchanged.
+ if perf_status is None:
+ derived_perf_status = UNCHANGED_PASS
+ else:
+ derived_perf_status = perf_status
+ new_entry.execution_status = derived_perf_status
+ else:
+ assert False, "Unexpected server result type."
+ collated_results[test_name] = new_entry
+
+ # Double check that all values are there for all tests.
+ for test in collated_results.values():
+ test.check()
+
+ rerunable_benches = [x for x in collated_results.values() \
+ if x.is_rerunable()]
+ rerunable_benches.sort(key=lambda x: x.name)
+ # Now lets do the reruns.
+ rerun_results = []
+ summary = "Rerunning {} of {} benchmarks."
+ note(summary.format(len(rerunable_benches),
+ len(collated_results.values())))
+
+ for i, bench in enumerate(rerunable_benches):
+ note("Rerunning: {} [{}/{}]".format(bench.name,
+ i + 1,
+ len(rerunable_benches)))
+ fresh_samples = rerun_test(config,
+ bench.name,
+ NUMBER_OF_RERUNS)
+ rerun_results.extend(fresh_samples)
+
+ return rerun_results
+
usage_info = """
Script for running the tests in LLVM's test-suite repository.
@@ -1249,7 +1550,9 @@ class NTTest(builtintest.BuiltinTest):
help=("Use perf to obtain high accuracy timing"
"[%default]"),
type=str, default=None)
-
+ group.add_option("", "--rerun", dest="rerun",
+ help="Rerun tests that have regressed.",
+ action="store_true", default=False)
group.add_option("", "--remote", dest="remote",
help=("Execute remotely, see "
"--remote-{host,port,user,client} [%default]"),
@@ -1514,6 +1817,18 @@ class NTTest(builtintest.BuiltinTest):
else:
test_results = run_test(nick, None, config)
+ if opts.rerun:
+ self.log("Performing any needed reruns.")
+ server_report = self.submit_helper(config, commit=False)
+ new_samples = _process_reruns(config, server_report, test_results)
+ test_results.update_report(new_samples)
+
+ # persist report with new samples.
+ lnt_report_path = config.report_path(None)
+
+ lnt_report_file = open(lnt_report_path, 'w')
+ print >>lnt_report_file, test_results.render()
+ lnt_report_file.close()
if config.output is not None:
self.print_report(test_results, config.output)
Modified: lnt/trunk/tests/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lit.cfg?rev=218080&r1=218079&r2=218080&view=diff
==============================================================================
--- lnt/trunk/tests/lit.cfg (original)
+++ lnt/trunk/tests/lit.cfg Thu Sep 18 16:51:10 2014
@@ -48,3 +48,4 @@ config.available_features.add(platform.s
if lit_config.params.get('check-coverage', None):
config.environment['COVERAGE_PROCESS_START'] = os.path.join(
os.path.dirname(__file__), ".coveragerc")
+
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/Makefile
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/Makefile?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/Makefile (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/Makefile Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+# Fake makefile
+
+include Makefile.config
+
+tools:
+ echo "This is a fake tools build."
+
+report:
+ echo "This is a fake report build."
+.PHONY: report
+
+report.simple.csv: report
+ cp ${PROJ_SRC_ROOT}/fake-report.simple.csv $@
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/README.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/README.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/README.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/README.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,2 @@
+This is a dummy set of LLVM test-suite sources, just intended for use with
+testing the 'lnt runtest nt' module.
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/TEST.simple.report
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/TEST.simple.report?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/TEST.simple.report (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/TEST.simple.report Thu Sep 18 16:51:10 2014
@@ -0,0 +1,32 @@
+##=== TEST.nightly.report - Report description for nightly -----*- perl -*-===##
+#
+# This file defines a report to be generated for the nightly tests.
+#
+##===----------------------------------------------------------------------===##
+
+# Sort by program name
+$SortCol = 0;
+$TrimRepeatedPrefix = 1;
+
+my $WallTimeRE = "Time: ([0-9.]+) seconds \\([0-9.]+ wall clock";
+
+# FormatTime - Convert a time from 1m23.45 into 83.45
+sub FormatTime {
+ my $Time = shift;
+ if ($Time =~ m/([0-9]+)[m:]([0-9.]+)/) {
+ return sprintf("%7.4f", $1*60.0+$2);
+ }
+
+ return sprintf("%7.4f", $Time);
+}
+
+(
+ ["Program" , '\'([^\']+)\' Program'],
+ [],
+ ["CC" , 'TEST-RESULT-compile-success: (pass|fail|xfail)'],
+ ["CC_Time" , 'TEST-RESULT-compile-time: user\s*([.0-9m:]+)', \&FormatTime],
+ ["CC_Real_Time", 'TEST-RESULT-compile-real-time: real\s*([.0-9m:]+)', \&FormatTime],
+ ["Exec" , 'TEST-RESULT-exec-success: (pass|fail|xfail)'],
+ ["Exec_Time", 'TEST-RESULT-exec-time: user\s*([.0-9m:]+)', \&FormatTime],
+ ["Exec_Real_Time", 'TEST-RESULT-exec-real-time: real\s*([.0-9m:]+)', \&FormatTime],
+);
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure Thu Sep 18 16:51:10 2014
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+SRC_PATH=$(dirname $0)
+
+echo "This is a fake configure script."
+
+echo "Copying in Makefile..."
+cp $SRC_PATH/Makefile .
+cp $SRC_PATH/subtest/Makefile ./subtest/
+
+echo "Creating Makefile.config..."
+echo "PROJ_SRC_ROOT = \"${SRC_PATH}\"" > Makefile.config
Propchange: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/configure
------------------------------------------------------------------------------
svn:executable = *
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/fake-report.simple.csv
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/fake-report.simple.csv?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/fake-report.simple.csv (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/fake-report.simple.csv Thu Sep 18 16:51:10 2014
@@ -0,0 +1,70 @@
+Program,CC,CC_Time,Exec,Exec_Time
+AtomicOps,pass, 0.0076,pass, 0.0003
+DefaultInitDynArrays,pass, 0.0159,pass, 0.0003
+FloatPrecision,pass, 0.0078,pass, 0.0003
+ObjC++/Hello,pass, 0.3348,pass, 0.0032
+ObjC++/property-reference,pass, 0.4295,pass, 0.0060
+ObjC++/property-reference-object,pass, 0.3569,pass, 0.0031
+ObjC++/reference-in-block-args,pass, 0.0127,pass, 0.0030
+ObjC/bitfield-access,pass, 0.0171,pass, 0.0030
+ObjC/bitfield-access-2,pass, 0.0171,pass, 0.0030
+ObjC/block-byref-aggr,pass, 0.3288,pass, 0.0030
+ObjC/constant-strings,pass, 0.0072,pass, 0.0031
+ObjC/dot-syntax,pass, 0.0223,pass, 0.0030
+ObjC/dot-syntax-1,pass, 0.0264,pass, 0.0031
+ObjC/dot-syntax-2,pass, 0.0136,pass, 0.0032
+ObjC/exceptions,pass, 0.2270,pass, 0.0032
+ObjC/exceptions-2,pass, 0.2062,pass, 0.0030
+ObjC/exceptions-3,pass, 0.2097,pass, 0.0031
+ObjC/exceptions-4,pass, 0.2103,pass, 0.0048
+ObjC/for-in,pass, 0.2147,pass, 0.0034
+ObjC/instance-method-metadata,pass, 0.2126,pass, 0.0030
+ObjC/messages,pass, 0.0193,pass, 0.0030
+ObjC/messages-2,pass, 0.0356,pass, 0.0030
+ObjC/parameter-passing,pass, 0.2268,pass, 0.0031
+ObjC/predefined-expr-in-method,pass, 0.0115,pass, 0.0031
+ObjC/property,pass, 0.2239,pass, 0.0031
+ObjC/protocols,pass, 0.0193,pass, 0.0030
+ObjC/synchronized,pass, 0.2094,pass, 0.1217
+ObjC/trivial-interface,pass, 0.2071,pass, 0.0031
+SignlessTypes/Large/cast,pass, 0.0314,pass, 0.0087
+SignlessTypes/cast-bug,pass, 0.0061,pass, 0.0003
+SignlessTypes/cast2,pass, 0.0085,pass, 0.0003
+SignlessTypes/ccc,pass, 0.0160,pass, 0.0003
+SignlessTypes/div,pass, 0.0139,pass, 0.0003
+SignlessTypes/factor,pass, 0.0169,pass, 0.0003
+SignlessTypes/rem,pass, 0.0599,pass, 0.0009
+SignlessTypes/shr,pass, 0.0139,pass, 0.0003
+StructModifyTest,pass, 0.0062,pass, 0.0003
+TestLoop,pass, 0.0088,pass, 0.0003
+Vector/SSE/sse.expandfft,pass, 0.0652,pass, 0.2459
+Vector/SSE/sse.isamax,pass, 0.0388,pass, 0.0003
+Vector/SSE/sse.shift,pass, 0.0217,pass, 0.0003
+Vector/SSE/sse.stepfft,pass, 0.0524,pass, 0.3313
+Vector/build,pass, 0.0121,pass, 0.0003
+Vector/build2,pass, 0.0159,pass, 1.1560
+Vector/divides,pass, 0.0090,pass, 0.0003
+Vector/multiplies,pass, 0.0169,pass, 1.8812
+Vector/simple,pass, 0.0134,pass, 0.0003
+Vector/sumarray,pass, 0.0099,pass, 0.0003
+Vector/sumarray-dbl,pass, 0.0107,pass, 0.0003
+block-byref-cxxobj-test,pass, 0.0148,pass, 0.0003
+block-byref-test,pass, 0.0080,pass, 0.0003
+block-call-r7674133,pass, 0.0072,pass, 0.0003
+block-copied-in-cxxobj,pass, 0.0186,pass, 0.0003
+block-copied-in-cxxobj-1,pass, 0.0165,pass, 0.0003
+blockstret,pass, 0.0089,pass, 0.0003
+byval-alignment,pass, 0.0079,pass, 0.0003
+conditional-gnu-ext,pass, 0.0066,pass, 0.0003
+conditional-gnu-ext-cxx,pass, 0.0082,pass, 0.0003
+initp1,pass, 0.0240,pass, 0.0003
+member-function-pointers,pass, 0.0120,pass, 0.0003
+ms_struct-bitfield,pass, 0.0053,pass, 0.0003
+ms_struct-bitfield-1,pass, 0.0049,pass, 0.0003
+ms_struct-bitfield-init,pass, 0.0100,pass, 0.0003
+ms_struct-bitfield-init-1,pass, 0.0119,pass, 0.0003
+ms_struct_pack_layout,pass, 0.0111,pass, 0.0003
+ms_struct_pack_layout-1,pass, 0.0046,pass, 0.0003
+printargs,pass, 0.0085,pass,0.01
+stmtexpr,pass, 0.0090, *,0.01
+vla,pass, 0.0194,pass, 0.0003
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/Makefile
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/Makefile?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/Makefile (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite1/subtest/Makefile Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+# Fake makefile
+
+include ../Makefile.config
+
+tools:
+ echo "This is a fake tools build."
+
+report:
+ echo "This is a fake report build."
+.PHONY: report
+
+report.simple.csv: report
+ cp ${PROJ_SRC_ROOT}/fake-report.simple.csv $@
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl Thu Sep 18 16:51:10 2014
@@ -0,0 +1,404 @@
+#!/usr/bin/perl -w
+#
+# Program: GenerateReport.pl
+#
+# Synopsis: Summarize a big log file into a table of values, commonly used for
+# testing. This can generate either a plaintext table, HTML table,
+# or Latex table, depending on whether the -html or -latex options are
+# specified.
+#
+# This script reads a report description file to specify the fields
+# and descriptions for the columns of interest. In reads the raw log
+# input from stdin and writes the table to stdout.
+#
+# Syntax: GenerateReport.pl [-html] [-latex] [-graphs] [-csv] <ReportDesc>
+# < Input > Output
+#
+
+# Default values for arguments
+my $HTML = 0;
+my $LATEX = 0;
+my $GRAPHS = 0;
+my $CSV = 0;
+
+# Parse arguments...
+while ($_ = $ARGV[0], /^[-+]/) {
+ shift;
+ last if /^--$/; # Stop processing arguments on --
+
+ # List command line options here...
+ if (/^-html$/) { $HTML = 1; next; }
+ if (/^-latex$/) { $LATEX = 1; next; }
+ if (/^-graphs$/) { $GRAPHS = 1; next; }
+ if (/^-csv$/) { $CSV = 1; next; }
+
+ print "Unknown option: $_ : ignoring!\n";
+}
+
+#
+# Parameters which may be overriden by the report description file.
+#
+
+# The column to sort by, to be overridden as necessary by the report description
+my $SortCol = 0;
+my $SortReverse = 0;
+my $SortNumeric = 0; # Sort numerically or textually?
+
+# If the report wants us to trim repeated path prefixes off of the start of the
+# strings in the first column of the report, we can do that.
+my $TrimRepeatedPrefix = 0;
+my $TrimAllDirectories = 0;
+
+# Helper functions which may be called by the report description files...
+sub SumCols {
+ my ($Cols, $Col, $NumRows) = @_;
+ $Val = 0;
+ while ($NumRows) {
+ $Col--; $NumRows--;
+ $Val += $Cols->[$Col] if ($Cols->[$Col] ne "*");
+ }
+ return $Val;
+}
+
+sub AddColumns {
+ my ($Cols, $Col, @Indices) = @_;
+ my $result = 0;
+
+ foreach $Idx (@Indices) {
+ if ($Cols->[$Col+$Idx] ne "*") {
+ $result += $Cols->[$Col+$Idx];
+ }
+ }
+
+ return $result;
+}
+
+# Check command line arguments...
+die "Must specify a report description option" if (scalar(@ARGV) < 1);
+
+# Read file input in one big gulp...
+undef $/;
+
+# Read raw data file and split it up into records. Each benchmarks starts with
+# a line with a >>> prefix
+#
+my @Records = split />>> ========= /, <STDIN>;
+
+# Delete the first "entry" which is really stuff printed prior to starting the
+# first test.
+shift @Records;
+
+# Read and eval the report description file now. This defines the Fields array
+# and may potentially modify some of our global settings like the sort key.
+#
+my $ReportFN = $ARGV[0];
+#print "Reading report description from $ReportFN\n";
+open(REPORTDESC, $ReportFN) or
+ die "Couldn't open report description '$ReportFN'!";
+
+# HilightColumns - Filled in by the report if desired in HTML mode. This
+# contains a column number if the HTML version of the output should highlight a
+# cell in green/red if it is gt/lt 1.0 by a significant margin.
+my %HilightColumns;
+
+my @LatexColumns; # Filled in by report if it supports Latex mode
+my %LatexColumnFormat; # Filled in by report if supports latex mode
+my @Graphs; # Filled in by the report if supports graph mode
+
+# Fill in all of the fields from the report description
+my @Fields = eval <REPORTDESC>;
+
+
+#
+# Read data into the table of values...
+#
+my @Values;
+foreach $Record (@Records) {
+ my @RowValues;
+ my $Col = 0;
+ for $Row (@Fields) {
+ my $Val = "*";
+ if (scalar(@$Row)) { # An actual value to read?
+ if (ref ($Row->[1])) { # Code to be executed?
+ $Val = &{$Row->[1]}(\@RowValues, $Col);
+ } else { # Field to be read...
+ $Record =~ m/$Row->[1]/;
+ if (!defined($1)) {
+ $Val = "*";
+ } else {
+ # If there is a formatting function, run it now...
+ $Val = $1;
+ if (scalar(@$Row) > 2) {
+ $Val = &{$Row->[2]}($Val);
+ }
+ }
+ }
+ } else { # Just add a seperator...
+ $Val = "|";
+ }
+
+ push @RowValues, $Val;
+ $Col++;
+ }
+
+ my $Assert = "";
+ if ($Record =~ m/Assertion/) {
+ # If an assertion failure occured, print it out.
+ $Assert = sprintf "\n\t\t\t%s", (grep /Assertion/, (split "\n", $Record));
+ }
+ push @RowValues, $Assert if (!$HTML);
+ push @Values, [@RowValues];
+}
+
+
+# If the report wants it, we can trim excess cruft off of the beginning of the
+# first column (which is often a path).
+if ($TrimRepeatedPrefix and scalar(@Values)) {
+ OuterLoop: while (1) {
+ # Figure out what the first path prefix is:
+ $Values[0]->[0] =~ m|^([^/]*/).|;
+ last OuterLoop if (!defined($1));
+
+ # Now that we have the prefix, check to see if all of the entries in the
+ # table start with this prefix.
+ foreach $Row (@Values) {
+ last OuterLoop if ((substr $Row->[0], 0, length $1) ne $1);
+ }
+
+ # If we get here, then all of the entries have the prefix. Remove it now.
+ foreach $Row (@Values) {
+ $Row->[0] = substr $Row->[0], length $1;
+ }
+ }
+}
+
+# If the report wants it, we can trim of all of the directories part of the
+# first column.
+if ($TrimAllDirectories and scalar(@Values)) {
+ foreach $Row (@Values) {
+ $Row->[0] =~ s|^.*/||g;
+ }
+}
+
+
+#
+# Sort table now...
+#
+if ($SortNumeric) {
+ @Values = sort { $lhs = $a->[$SortCol]; $rhs = $b->[$SortCol];
+ $lhs = 0 if ($lhs eq "*");
+ $rhs = 0 if ($rhs eq "*");
+ $lhs <=> $rhs } @Values;
+} else {
+ @Values = sort { $a->[$SortCol] cmp $b->[$SortCol] } @Values;
+}
+ at Values = reverse @Values if ($SortReverse);
+
+#
+# Condense the header into an easier to access array...
+#
+my @Header;
+for $Row (@Fields) {
+ if (scalar(@$Row)) { # Non-empty row?
+ push @Header, $Row->[0];
+ } else { # Empty row, just add seperator
+ push @Header, "|";
+ }
+}
+
+if ($HTML) {
+ sub printCell {
+ my $Str = shift;
+ my $ColNo = shift;
+ my $IsWhite = shift;
+ my $Attrs = "";
+ if ($Str eq '|') {
+ $Attrs = " bgcolor='black' width='1'";
+ $Str = "";
+ } else {
+ # If the user requested that we highlight this column, check to see what
+ # number it is. If it is > 1.05, we color it green, < 0.95 we use red.
+ # If it's not a number, ignore it.
+ if ($HilightColumns{$ColNo}) {
+ if ($Str =~ m/^([0-9]+).?[0-9.]*$/) {
+ if ($Str <= 0.85) {
+ $Attrs = " bgcolor='#FF7070'";
+ } elsif ($Str <= 0.95) {
+ $Attrs = " bgcolor='#FFAAAA'";
+ } elsif ($Str >= 1.15) {
+ $Attrs = " bgcolor='#80FF80'";
+ } elsif ($Str >= 1.05) {
+ $Attrs = " bgcolor='#CCFFCC'";
+ }
+ }
+
+ if (!$IsWhite && $Attrs eq "") {
+ # If it's not already white, make it white now.
+ $Attrs = " bgcolor=white";
+ }
+ }
+ };
+ print "<td$Attrs>$Str</td>";
+ "";
+ }
+
+ print "<table border='0' cellspacing='0' cellpadding='0'>\n";
+ print "<tr bgcolor=#FFCC99>\n";
+ map {
+ $_ = "<center><b><a href=\"#$_\">$_</a></b></center>"
+ if $_ ne "|";
+ printCell($_, -1)
+ } @Header;
+ print "\n</tr><tr bgcolor='black' height=1>";
+ print "</tr>\n";
+ my $RowCount = 0;
+ foreach $Row (@Values) {
+ my $IsWhite;
+ $IsWhite = ++$RowCount <= 2;
+ print "<tr bgcolor='" . ($IsWhite ? "white" : "#CCCCCC") . "'>\n";
+ $RowCount = 0 if ($RowCount > 3);
+ my $ColCount = 0;
+ map { printCell($_, $ColCount++, $IsWhite); } @$Row;
+ print "\n</tr>\n";
+ }
+ print "\n</table>\n";
+} elsif ($GRAPHS) { # Graph output...
+ print "Generating gnuplot data files:\n";
+ my $GraphNo = 0;
+ foreach $Graph (@Graphs) {
+ my @Graph = @$Graph;
+ my $Type = shift @Graph;
+ die "Only scatter graphs supported right now, not '$Type'!"
+ if ($Type ne "scatter");
+
+ my $Filename = shift @Graph;
+
+ print "Writing '$Filename'...\n";
+ open (FILE, ">$Filename") or die ("Could not open file '$Filename'!");
+
+ my ($XCol, $YCol) = @Graph;
+ foreach $Row (@Values) {
+ print FILE $$Row[$XCol] . "\t" . $$Row[$YCol] . "\n";
+ }
+ close FILE;
+ ++$GraphNo;
+ }
+
+} else {
+ # Add the header for the report to the table after sorting...
+ unshift @Values, [@Header];
+
+ #
+ # Figure out how wide each field should be...
+ #
+ my @FieldWidths = (0) x scalar(@Fields);
+ foreach $Value (@Values) {
+ for ($i = 0; $i < @$Value-1; $i++) {
+ if (length($$Value[$i]) > $FieldWidths[$i]) {
+ $FieldWidths[$i] = length($$Value[$i])
+ }
+ }
+ }
+
+ if ($LATEX) {
+ #
+ # Print out the latexified table...
+ #
+ shift @Values; # Don't print the header...
+
+ # Make sure the benchmark name field is wide enough for any aliases.
+ foreach $Name (@LatexRowMapOrder) {
+ $FieldWidths[0] = length $Name if (length($Name) > $FieldWidths[0]);
+ }
+
+ # Print out benchmarks listed in the LatexRowMapOrder
+ for ($i = 0; $i < @LatexRowMapOrder; $i += 2) {
+ my $Name = $LatexRowMapOrder[$i];
+ if ($Name eq '-') {
+ print "\\hline\n";
+ } else {
+ # Output benchmark name...
+ printf "%-$FieldWidths[0]s", $LatexRowMapOrder[$i+1];
+
+ # Find the row that this benchmark name corresponds to...
+ foreach $Row (@Values) {
+ if ($Row->[0] eq $Name) {
+ for $ColNum (@LatexColumns) {
+ # Print a seperator...
+ my $Val = $Row->[$ColNum];
+ if (exists $LatexColumnFormat{$ColNum}) {
+ # If a column format routine has been specified, run it now...
+ $Val = &{$LatexColumnFormat{$ColNum}}($Val);
+ }
+
+ # Escape illegal latex characters
+ $Val =~ s/([%#])/\\$1/g;
+
+ printf " & %-$FieldWidths[$ColNum]s", $Val;
+ }
+ goto Done;
+ }
+ }
+ print "UNKNOWN Benchmark name: " . $Name;
+ Done:
+ print "\\\\\n";
+ }
+ }
+ } elsif ($CSV && scalar(@LatexRowMapOrder)) {
+ #
+ # Print out the table as csv in the row-order specified by LatexRowMapOrder
+ #
+ for ($i = 0; $i < @LatexRowMapOrder; $i += 2) {
+ my $Name = $LatexRowMapOrder[$i];
+ if ($Name eq '-') {
+ print "----\n";
+ } else {
+ # Output benchmark name.
+ printf "$LatexRowMapOrder[$i+1]";
+
+ # Find the row that this benchmark name corresponds to.
+ foreach $Row (@Values) {
+ if ($Row->[0] eq $Name) {
+ for ($j = 1; $j < @$Row-1; $j++) {
+ print ",$$Row[$j]";
+ }
+ goto Done;
+ }
+ }
+ print "UNKNOWN Benchmark name: " . $Name;
+ Done:
+ print "\\\\\n";
+ }
+ }
+
+ } elsif ($CSV) {
+ #
+ # Print out the table as csv
+ #
+ my $firstrow = 1;
+ foreach $Value (@Values) {
+ printf "$$Value[0]";
+ for ($i = 1; $i < @$Value-1; $i++) {
+ print ",$$Value[$i]" if ($$Value[$i] ne "|");
+ }
+ if ($firstrow) {
+ # Print an extra column for the header.
+ print ",$$Value[@$Value-1]";
+ $firstrow = 0;
+ }
+ print "\n";
+ }
+ } else {
+ #
+ # Print out the table in plaintext format now...
+ #
+ foreach $Value (@Values) {
+ for ($i = 0; $i < @$Value-1; $i++) {
+ printf "%-$FieldWidths[$i]s ", $$Value[$i];
+ }
+
+ # Print the assertion message if existant...
+ print "$$Value[@$Value-1]\n";
+ }
+ }
+}
Propchange: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/GenerateReport.pl
------------------------------------------------------------------------------
svn:executable = *
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile Thu Sep 18 16:51:10 2014
@@ -0,0 +1,33 @@
+# Fake test-suite Makefile for rerun tests.
+#
+# Support the targets needed for doing a rerun. Those are the original report
+# then the rerun of each of the rerun reports.
+
+include Makefile.config
+
+MS_STRUCT := ms_struct-bitfield
+STRUCT_LAYOUT := ms_struct_pack_layout-1
+VLA := vla
+
+ALL_BENCHES = Output/$(MS_STRUCT).simple.report.txt \
+ Output/$(STRUCT_LAYOUT).simple.report.txt \
+ Output/$(VLA).simple.report.txt
+
+tools:
+ @echo "This is a fake tools build."
+
+report: $(ALL_BENCHES)
+ echo "AB: $(ALL_BENCHES)"
+ @echo "This is a fake report build too."
+ touch report.simple.txt
+ touch report.simple.raw.out
+
+.PHONY: report
+
+report.simple.csv: report
+ cp ${PROJ_SRC_ROOT}/fake-report.simple.csv $@
+
+Output/%.simple.report.txt:
+ mkdir -p Output
+ touch Output/$*.out-simple
+ cp ${PROJ_SRC_ROOT}/$@ $@
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile.config
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile.config?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile.config (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Makefile.config Thu Sep 18 16:51:10 2014
@@ -0,0 +1 @@
+PROJ_SRC_ROOT = "."
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct-bitfield.simple.report.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct-bitfield.simple.report.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct-bitfield.simple.report.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct-bitfield.simple.report.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+---------------------------------------------------------------
+>>> ========= '/private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct-bitfield' Program
+---------------------------------------------------------------
+
+TEST-PASS: compile /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct-bitfield
+TEST-RESULT-compile-success: pass
+TEST-RESULT-compile-time: user 0.7001
+TEST-RESULT-compile-real-time: real 0.8219
+
+TEST-PASS: exec /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct
+TEST-RESULT-exec-success: pass
+TEST-RESULT-exec-time: user 0.9900
+TEST-RESULT-exec-real-time: real 1.0802
\ No newline at end of file
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct_pack_layout-1.simple.report.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct_pack_layout-1.simple.report.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct_pack_layout-1.simple.report.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/ms_struct_pack_layout-1.simple.report.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+---------------------------------------------------------------
+>>> ========= '/private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct_pack_layout-1' Program
+---------------------------------------------------------------
+
+TEST-PASS: compile /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct_pack_layout-1
+TEST-RESULT-compile-success: pass
+TEST-RESULT-compile-time: user 0.7001
+TEST-RESULT-compile-real-time: real 0.8219
+
+TEST-PASS: exec /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout/ms_struct_pack_layout-1
+TEST-RESULT-exec-success: pass
+TEST-RESULT-exec-time: user 0.9900
+TEST-RESULT-exec-real-time: real 1.0802
\ No newline at end of file
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/vla.simple.report.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/vla.simple.report.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/vla.simple.report.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/Output/vla.simple.report.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+---------------------------------------------------------------
+>>> ========= '/private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla' Program
+---------------------------------------------------------------
+
+TEST-PASS: compile /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla
+TEST-RESULT-compile-success: pass
+TEST-RESULT-compile-time: user 0.7001
+TEST-RESULT-compile-real-time: real 0.8219
+
+TEST-PASS: exec /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla
+TEST-RESULT-exec-success: pass
+TEST-RESULT-exec-time: user 0.9900
+TEST-RESULT-exec-real-time: real 1.0802
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/README.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/README.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/README.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/README.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,2 @@
+This is a dummy set of LLVM test-suite sources, just intended for use with
+testing the 'lnt runtest nt' module.
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/TEST.simple.report
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/TEST.simple.report?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/TEST.simple.report (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/TEST.simple.report Thu Sep 18 16:51:10 2014
@@ -0,0 +1,32 @@
+##=== TEST.nightly.report - Report description for nightly -----*- perl -*-===##
+#
+# This file defines a report to be generated for the nightly tests.
+#
+##===----------------------------------------------------------------------===##
+
+# Sort by program name
+$SortCol = 0;
+$TrimRepeatedPrefix = 1;
+
+my $WallTimeRE = "Time: ([0-9.]+) seconds \\([0-9.]+ wall clock";
+
+# FormatTime - Convert a time from 1m23.45 into 83.45
+sub FormatTime {
+ my $Time = shift;
+ if ($Time =~ m/([0-9]+)[m:]([0-9.]+)/) {
+ return sprintf("%7.4f", $1*60.0+$2);
+ }
+
+ return sprintf("%7.4f", $Time);
+}
+
+(
+ ["Program" , '\'([^\']+)\' Program'],
+ [],
+ ["CC" , 'TEST-RESULT-compile-success: (pass|fail|xfail)'],
+ ["CC_Time" , 'TEST-RESULT-compile-time: user\s*([.0-9m:]+)', \&FormatTime],
+ ["CC_Real_Time", 'TEST-RESULT-compile-real-time: real\s*([.0-9m:]+)', \&FormatTime],
+ ["Exec" , 'TEST-RESULT-exec-success: (pass|fail|xfail)'],
+ ["Exec_Time", 'TEST-RESULT-exec-time: user\s*([.0-9m:]+)', \&FormatTime],
+ ["Exec_Real_Time", 'TEST-RESULT-exec-real-time: real\s*([.0-9m:]+)', \&FormatTime],
+);
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure Thu Sep 18 16:51:10 2014
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+SRC_PATH=$(dirname $0)
+
+echo "This is a fake configure script."
+
+echo "Copying in Makefile..."
+cp $SRC_PATH/Makefile .
+cp $SRC_PATH/subtest/Makefile ./subtest/
+
+echo "Creating Makefile.config..."
+echo "PROJ_SRC_ROOT = \"${SRC_PATH}\"" > Makefile.config
Propchange: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/configure
------------------------------------------------------------------------------
svn:executable = *
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/fake-report.simple.csv
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/fake-report.simple.csv?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/fake-report.simple.csv (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/fake-report.simple.csv Thu Sep 18 16:51:10 2014
@@ -0,0 +1,70 @@
+Program,CC,CC_Time,Exec,Exec_Time
+AtomicOps,pass, 0.0076,pass, 0.0003
+DefaultInitDynArrays,pass, 0.0159,pass, 0.0003
+FloatPrecision,pass, 0.0078,pass, 0.0003
+ObjC++/Hello,pass, 0.3348,pass, 0.0032
+ObjC++/property-reference,pass, 0.4295,pass, 0.0060
+ObjC++/property-reference-object,pass, 0.3569,pass, 0.0031
+ObjC++/reference-in-block-args,pass, 0.0127,pass, 0.0030
+ObjC/bitfield-access,pass, 0.0171,pass, 0.0030
+ObjC/bitfield-access-2,pass, 0.0171,pass, 0.0030
+ObjC/block-byref-aggr,pass, 0.3288,pass, 0.0030
+ObjC/constant-strings,pass, 0.0072,pass, 0.0031
+ObjC/dot-syntax,pass, 0.0223,pass, 0.0030
+ObjC/dot-syntax-1,pass, 0.0264,pass, 0.0031
+ObjC/dot-syntax-2,pass, 0.0136,pass, 0.0032
+ObjC/exceptions,pass, 0.2270,pass, 0.0032
+ObjC/exceptions-2,pass, 0.2062,pass, 0.0030
+ObjC/exceptions-3,pass, 0.2097,pass, 0.0031
+ObjC/exceptions-4,pass, 0.2103,pass, 0.0048
+ObjC/for-in,pass, 0.2147,pass, 0.0034
+ObjC/instance-method-metadata,pass, 0.2126,pass, 0.0030
+ObjC/messages,pass, 0.0193,pass, 0.0030
+ObjC/messages-2,pass, 0.0356,pass, 0.0030
+ObjC/parameter-passing,pass, 0.2268,pass, 0.0031
+ObjC/predefined-expr-in-method,pass, 0.0115,pass, 0.0031
+ObjC/property,pass, 0.2239,pass, 0.0031
+ObjC/protocols,pass, 0.0193,pass, 0.0030
+ObjC/synchronized,pass, 0.2094,pass, 0.1217
+ObjC/trivial-interface,pass, 0.2071,pass, 0.0031
+SignlessTypes/Large/cast,pass, 0.0314,pass, 0.0087
+SignlessTypes/cast-bug,pass, 0.0061,pass, 0.0003
+SignlessTypes/cast2,pass, 0.0085,pass, 0.0003
+SignlessTypes/ccc,pass, 0.0160,pass, 0.0003
+SignlessTypes/div,pass, 0.0139,pass, 0.0003
+SignlessTypes/factor,pass, 0.0169,pass, 0.0003
+SignlessTypes/rem,pass, 0.0599,pass, 0.0009
+SignlessTypes/shr,pass, 0.0139,pass, 0.0003
+StructModifyTest,pass, 0.0062,pass, 0.0003
+TestLoop,pass, 0.0088,pass, 0.0003
+Vector/SSE/sse.expandfft,pass, 0.0652,pass, 0.2459
+Vector/SSE/sse.isamax,pass, 0.0388,pass, 0.0003
+Vector/SSE/sse.shift,pass, 0.0217,pass, 0.0003
+Vector/SSE/sse.stepfft,pass, 0.0524,pass, 0.3313
+Vector/build,pass, 0.0121,pass, 0.0003
+Vector/build2,pass, 0.0159,pass, 1.1560
+Vector/divides,pass, 0.0090,pass, 0.0003
+Vector/multiplies,pass, 0.0169,pass, 1.8812
+Vector/simple,pass, 0.0134,pass, 0.0003
+Vector/sumarray,pass, 0.0099,pass, 0.0003
+Vector/sumarray-dbl,pass, 0.0107,pass, 0.0003
+block-byref-cxxobj-test,pass, 0.0148,pass, 0.0003
+block-byref-test,pass, 0.0080,pass, 0.0003
+block-call-r7674133,pass, 0.0072,pass, 0.0003
+block-copied-in-cxxobj,pass, 0.0186,pass, 0.0003
+block-copied-in-cxxobj-1,pass, 0.0165,pass, 0.0003
+blockstret,pass, 0.0089,pass, 0.0003
+byval-alignment,pass, 0.0079,pass, 0.0003
+conditional-gnu-ext,pass, 0.0066,pass, 0.0003
+conditional-gnu-ext-cxx,pass, 0.0082,pass, 0.0003
+initp1,*, 0.0240,*, 0.0003
+member-function-pointers,pass, 0.0120,pass, 0.0003
+ms_struct-bitfield,pass, 0.0053,pass, 200.0003
+ms_struct-bitfield-1,pass, 0.0049,pass, 0.0003
+ms_struct-bitfield-init,pass, 0.0100,pass, 0.0003
+ms_struct-bitfield-init-1,pass, 0.0119,pass, 0.0003
+ms_struct_pack_layout,pass, 0.0111,pass, 0.0003
+ms_struct_pack_layout-1,pass, 0.0046,pass, 200.0003
+printargs,pass, 0.0085,*,0.02
+stmtexpr,pass, 0.0090,pass, 0.0003
+vla,pass, 0.0194,pass, 200.0003
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/Makefile
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/Makefile?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/Makefile (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/subtest/Makefile Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+# Fake makefile
+
+include ../Makefile.config
+
+tools:
+ echo "This is a fake tools build."
+
+report:
+ echo "This is a fake report build."
+.PHONY: report
+
+report.simple.csv: report
+ cp ${PROJ_SRC_ROOT}/fake-report.simple.csv $@
Added: lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/vla.simple.report.txt
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/vla.simple.report.txt?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/vla.simple.report.txt (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun-test-suite2/vla.simple.report.txt Thu Sep 18 16:51:10 2014
@@ -0,0 +1,13 @@
+---------------------------------------------------------------
+>>> ========= '/private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla' Program
+---------------------------------------------------------------
+
+TEST-PASS: compile /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla
+TEST-RESULT-compile-success: pass
+TEST-RESULT-compile-time: user 0.7001
+TEST-RESULT-compile-real-time: real 0.8219
+
+TEST-PASS: exec /private/tmp/lnt_test_26297-2/test-2014-09-09_22-12-54/SingleSource/Benchmarks/Shootout-C++/vla
+TEST-RESULT-exec-success: pass
+TEST-RESULT-exec-time: user 0.9900
+TEST-RESULT-exec-real-time: real 1.0802
Added: lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/lnt.db
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/lnt.db?rev=218080&view=auto
==============================================================================
Binary files lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/lnt.db (added) and lnt/trunk/tests/runtest/Inputs/rerun_server_instance/data/lnt.db Thu Sep 18 16:51:10 2014 differ
Added: lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.cfg
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.cfg?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.cfg (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.cfg Thu Sep 18 16:51:10 2014
@@ -0,0 +1,50 @@
+# LNT (aka Zorg) configuration file
+#
+# Paths are resolved relative to this file.
+
+# The configuration file version.
+config_version = (0, 1, 0)
+
+# Name to use for this installation. This appears in web page headers, for
+# example.
+name = 'LNT'
+
+# Path to the LNT server. This is required for use in emails where we need to
+# provide an absolute URL to the server.
+zorgURL = 'http://ozzy-2.local/perf'
+
+# Temporary directory, for use by the web app. This must be writable by the user
+# the web app runs as.
+tmp_dir = 'lnt_tmp'
+
+# Database directory, for easily rerooting the entire set of databases. Database
+# paths are resolved relative to the config path + this path.
+db_dir = 'data'
+
+# The list of available databases, and their properties. At a minimum, there
+# should be a 'default' entry for the default database.
+databases = {
+ 'default' : { 'path' : 'lnt.db',
+ 'db_version' : '0.4' },
+ }
+
+# The LNT email configuration.
+#
+# The 'to' field can be either a single email address, or a list of
+# (regular-expression, address) pairs. In the latter form, the machine name of
+# the submitted results is matched against the regular expressions to determine
+# which email address to use for the results.
+nt_emailer = {
+ 'enabled' : False,
+ 'host' : None,
+ 'from' : None,
+
+ # This is a list of (filter-regexp, address) pairs -- it is evaluated in
+ # order based on the machine name. This can be used to dispatch different
+ # reports to different email address.
+ 'to' : [(".*", None)],
+ }
+
+# Enable automatic restart using the wsgi_restart module; this should be off in
+# a production environment.
+wsgi_restart = False
Added: lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.wsgi
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.wsgi?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.wsgi (added)
+++ lnt/trunk/tests/runtest/Inputs/rerun_server_instance/lnt.wsgi Thu Sep 18 16:51:10 2014
@@ -0,0 +1,11 @@
+#!/venv/lnt-v0.4/bin/python
+# -*- Python -*-
+
+import lnt.server.ui.app
+
+application = lnt.server.ui.app.App.create_standalone(
+ '/Users/ddunbar/lnt/tests/server/db/migrate/Inputs/lnt_v0.4.0_filled_instance/lnt.cfg')
+
+if __name__ == "__main__":
+ import werkzeug
+ werkzeug.run_simple('localhost', 8000, application)
Added: lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh (added)
+++ lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh Thu Sep 18 16:51:10 2014
@@ -0,0 +1,45 @@
+#!/bin/bash
+# This script wraps a call to lnt runtest with a local server
+# instance. It is intended for testing full runtest invocations
+# that need a real server instnace to work.
+# ./runtest_server_wrapper <location of server files> <runtest type> [arguments for lnt runtest]
+# ./runtest_server_wrapper /tmp/ nt --cc /bin/clang --sandbox /tmp/sandbox
+
+# First launch the server.
+# TODO: we can't run the tests in parrelle unless we do something smart
+# with this port.
+
+PROGRAM="$(basename $0)"
+
+usage() {
+ echo "usage: $PROGRAM <location of server files> <runtest type> [arguments for lnt runtest]"
+ echo "e.g: $PROGRAM /tmp/ nt --cc /bin/clang --sandbox /tmp/sandbox"
+}
+
+error() {
+ echo "error: $PROGRAM: $*" >&2
+ usage >&2
+ exit 1
+}
+
+main() {
+ [ $# -lt 2 ] &&
+ error "not enough arguments"
+
+ lnt runserver $1 --hostname localhost --port 9090 &
+ local pid=$!
+ local type=$2
+ shift 2
+ lnt runtest $type --submit http://localhost:9090/db_default/submitRun $@
+ local rc=$?
+
+ kill -15 $pid
+ local kill_rc=$?
+ [ $kill_rc -ne 0 ] &&
+ error "wha happen?? $kill_rc"
+
+ wait $pid
+ exit $rc
+}
+
+main "$@"
\ No newline at end of file
Propchange: lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh
------------------------------------------------------------------------------
svn:executable = *
Added: lnt/trunk/tests/runtest/rerun.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/rerun.py?rev=218080&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/rerun.py (added)
+++ lnt/trunk/tests/runtest/rerun.py Thu Sep 18 16:51:10 2014
@@ -0,0 +1,61 @@
+# Testing for the rerun feature of LNT nt.
+# This test runs two stub test suites. The scond one has different values for
+# some of the test, so they should be marked as regressions, and reruns should
+# be triggered.
+
+# RUN: cp -a %S/Inputs/rerun_server_instance %S/Output/server
+# RUN: rm -f CHECK-STDOUT CHECK-STDOUT2 CHECK-STDERR CHECK-STDERR2
+# RUN: %S/Inputs/runtest_server_wrapper.sh %S/Output/server nt \
+# RUN: --sandbox %t.SANDBOX \
+# RUN: --test-suite %S/Inputs/rerun-test-suite1 \
+# RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \
+# RUN: --no-timestamp --rerun --run-order 1 > %t.log 2> %t.err
+# RUN: FileCheck --check-prefix CHECK-STDOUT < %t.log %s
+# RUN: FileCheck --check-prefix CHECK-STDERR < %t.err %s
+
+# CHECK-STDOUT: Import succeeded.
+# CHECK-STDOUT: Added Runs : 1
+# CHECK-STDOUT: --- Tested: 138 tests --
+
+# CHECK-STDERR: inferred C++ compiler under test
+# CHECK-STDERR: checking source versions
+# CHECK-STDERR: using nickname
+# CHECK-STDERR: starting test
+# CHECK-STDERR: configuring
+# CHECK-STDERR: building test-suite tools
+# CHECK-STDERR: executing "nightly tests" with -j1
+# CHECK-STDERR: loading nightly test data
+# CHECK-STDERR: capturing machine information
+# CHECK-STDERR: generating report
+# CHECK-STDERR: submitting result to
+# CHECK-STDERR: note: Rerunning 0 of 69 benchmarks.
+
+# RUN: %S/Inputs/runtest_server_wrapper.sh %S/Output/server nt \
+# RUN: --sandbox %t.SANDBOX2 \
+# RUN: --test-suite %S/Inputs/rerun-test-suite2 \
+# RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \
+# RUN: --no-timestamp --rerun --run-order 4 --verbose > %t.2.log 2> %t.2.err || cat %t.2.err
+# RUN: echo "Run 2"
+# RUN: FileCheck --check-prefix CHECK-STDOUT2 < %t.2.log %s
+# RUN: FileCheck --check-prefix CHECK-STDERR2 < %t.2.err %s
+
+# CHECK-STDOUT2: Import succeeded.
+# CHECK-STDOUT2: Added Runs : 1
+# CHECK-STDOUT2: --- Tested: 138 tests --
+
+# CHECK-STDERR2: inferred C++ compiler under test
+# CHECK-STDERR2: checking source versions
+# CHECK-STDERR2: using nickname
+# CHECK-STDERR2: starting test
+# CHECK-STDERR2: configuring
+# CHECK-STDERR2: building test-suite tools
+# CHECK-STDERR2: executing "nightly tests" with -j1
+# CHECK-STDERR2: loading nightly test data
+# CHECK-STDERR2: capturing machine information
+# CHECK-STDERR2: generating report
+# CHECK-STDERR2: note: Rerunning 3 of 69 benchmarks.
+# CHCCK-SDTERR2: note: Rerunning: ms_struct-bitfield [1/3]
+# CHCCK-SDTERR2: note: Rerunning: ms_struct_pack_layout-1 [2/3]
+# CHCCK-SDTERR2: note: Rerunning: vla [3/3]
+
+# CHECK-STDERR2: submitting result to
More information about the llvm-commits
mailing list