[llvm] r299605 - [lit] Implement timeouts and max_time for process pool testing
Reid Kleckner via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 5 17:38:29 PDT 2017
Author: rnk
Date: Wed Apr 5 19:38:28 2017
New Revision: 299605
URL: http://llvm.org/viewvc/llvm-project?rev=299605&view=rev
Log:
[lit] Implement timeouts and max_time for process pool testing
This is necessary to pass the lit test suite at llvm/utils/lit/tests.
There are some pre-existing failures here, but now switching to pools
doesn't regress any tests.
I had to change test-data/lit.cfg to import DummyConfig from a module to
fix pickling problems, but I think it'll be OK if we require test
formats to be written in real .py modules outside lit.cfg files.
I also discovered that in some circumstances AsyncResult.wait() will not
raise KeyboardInterrupt in a timely manner, but you can pass a non-zero
timeout to work around this. This makes threading.Condition.wait use a
polling loop that runs through the interpreter, so it's capable of
asynchronously raising KeyboardInterrupt.
Added:
llvm/trunk/utils/lit/tests/Inputs/test-data/dummy_format.py
Modified:
llvm/trunk/utils/lit/lit/run.py
llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg
Modified: llvm/trunk/utils/lit/lit/run.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/lit/run.py?rev=299605&r1=299604&r2=299605&view=diff
==============================================================================
--- llvm/trunk/utils/lit/lit/run.py (original)
+++ llvm/trunk/utils/lit/lit/run.py Wed Apr 5 19:38:28 2017
@@ -347,15 +347,6 @@ class Run(object):
{k: multiprocessing.Semaphore(v) for k, v in
self.lit_config.parallelism_groups.items()}
- # Save the display object on the runner so that we can update it from
- # our task completion callback.
- self.display = display
-
- # Start a process pool. Copy over the data shared between all test runs.
- pool = multiprocessing.Pool(jobs, worker_initializer,
- (self.lit_config,
- self.parallelism_semaphores))
-
# Install a console-control signal handler on Windows.
if win32api is not None:
def console_ctrl_handler(type):
@@ -366,10 +357,24 @@ class Run(object):
return True
win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
- # FIXME: Implement max_time using .wait() timeout argument and a
- # deadline.
+ # Save the display object on the runner so that we can update it from
+ # our task completion callback.
+ self.display = display
+
+ # We need to issue many wait calls, so compute the final deadline and
+ # subtract time.time() from that as we go along.
+ deadline = None
+ if max_time:
+ deadline = time.time() + max_time
+
+ # Start a process pool. Copy over the data shared between all test runs.
+ pool = multiprocessing.Pool(jobs, worker_initializer,
+ (self.lit_config,
+ self.parallelism_semaphores))
try:
+ self.failure_count = 0
+ self.hit_max_failures = False
async_results = [pool.apply_async(worker_run_one_test,
args=(test_index, test),
callback=self.consume_test_result)
@@ -378,10 +383,21 @@ class Run(object):
# Wait for all results to come in. The callback that runs in the
# parent process will update the display.
for a in async_results:
- a.wait()
+ if deadline:
+ a.wait(deadline - time.time())
+ else:
+ # Python condition variables cannot be interrupted unless
+ # they have a timeout. This can make lit unresponsive to
+ # KeyboardInterrupt, so do a busy wait with a timeout.
+ while not a.ready():
+ a.wait(1)
if not a.successful():
a.get() # Exceptions raised here come from the worker.
+ if self.hit_max_failures:
+ break
finally:
+ # Stop the workers and wait for any straggling results to come in
+ # if we exited without waiting on every async result.
pool.terminate()
pool.join()
@@ -398,6 +414,12 @@ class Run(object):
up the original test object. Also updates the progress bar as tasks
complete.
"""
+ # Don't add any more test results after we've hit the maximum failure
+ # count. Otherwise we're racing with the main thread, which is going
+ # to terminate the process pool soon.
+ if self.hit_max_failures:
+ return
+
(test_index, test_with_result) = pool_result
# Update the parent process copy of the test. This includes the result,
# XFAILS, REQUIRES, and UNSUPPORTED statuses.
@@ -406,6 +428,13 @@ class Run(object):
self.tests[test_index] = test_with_result
self.display.update(test_with_result)
+ # If we've finished all the tests or too many tests have failed, notify
+ # the main thread that we've stopped testing.
+ self.failure_count += (test_with_result.result.code == lit.Test.FAIL)
+ if self.lit_config.maxFailures and \
+ self.failure_count == self.lit_config.maxFailures:
+ self.hit_max_failures = True
+
child_lit_config = None
child_parallelism_semaphores = None
Added: llvm/trunk/utils/lit/tests/Inputs/test-data/dummy_format.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/tests/Inputs/test-data/dummy_format.py?rev=299605&view=auto
==============================================================================
--- llvm/trunk/utils/lit/tests/Inputs/test-data/dummy_format.py (added)
+++ llvm/trunk/utils/lit/tests/Inputs/test-data/dummy_format.py Wed Apr 5 19:38:28 2017
@@ -0,0 +1,38 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ return result
+
Modified: llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg?rev=299605&r1=299604&r2=299605&view=diff
==============================================================================
--- llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg (original)
+++ llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg Wed Apr 5 19:38:28 2017
@@ -1,44 +1,10 @@
-import os
-try:
- import ConfigParser
-except ImportError:
- import configparser as ConfigParser
-
-import lit.formats
-import lit.Test
-
-class DummyFormat(lit.formats.FileBasedTest):
- def execute(self, test, lit_config):
- # In this dummy format, expect that each test file is actually just a
- # .ini format dump of the results to report.
-
- source_path = test.getSourcePath()
-
- cfg = ConfigParser.ConfigParser()
- cfg.read(source_path)
-
- # Create the basic test result.
- result_code = cfg.get('global', 'result_code')
- result_output = cfg.get('global', 'result_output')
- result = lit.Test.Result(getattr(lit.Test, result_code),
- result_output)
-
- # Load additional metrics.
- for key,value_str in cfg.items('results'):
- value = eval(value_str)
- if isinstance(value, int):
- metric = lit.Test.IntMetricValue(value)
- elif isinstance(value, float):
- metric = lit.Test.RealMetricValue(value)
- else:
- raise RuntimeError("unsupported result type")
- result.addMetric(key, metric)
-
- return result
+import site
+site.addsitedir(os.path.dirname(__file__))
+import dummy_format
config.name = 'test-data'
config.suffixes = ['.ini']
-config.test_format = DummyFormat()
+config.test_format = dummy_format.DummyFormat()
config.test_source_root = None
config.test_exec_root = None
config.target_triple = None
More information about the llvm-commits
mailing list