diff --git a/apex_launchtest/apex_launchtest/__init__.py b/apex_launchtest/apex_launchtest/__init__.py index d6b60bd..99adf38 100644 --- a/apex_launchtest/apex_launchtest/__init__.py +++ b/apex_launchtest/apex_launchtest/__init__.py @@ -15,11 +15,13 @@ from .decorator import post_shutdown_test from .io_handler import ActiveIoHandler, IoHandler +from .parametrize import parametrize from .proc_info_handler import ActiveProcInfoHandler, ProcInfoHandler from .ready_aggregator import ReadyAggregator __all__ = [ # Functions + 'parametrize', 'post_shutdown_test', # Classes diff --git a/apex_launchtest/apex_launchtest/apex_launchtest_main.py b/apex_launchtest/apex_launchtest/apex_launchtest_main.py index fd3851a..0d4ee64 100644 --- a/apex_launchtest/apex_launchtest/apex_launchtest_main.py +++ b/apex_launchtest/apex_launchtest/apex_launchtest_main.py @@ -21,6 +21,7 @@ from .apex_runner import ApexRunner from .domain_coordinator import get_coordinated_domain_id from .junitxml import unittestResultsToXml +from .loader import LoadTestsFromPythonModule from .print_arguments import print_arguments_of_launch_description _logger_ = logging.getLogger(__name__) @@ -29,7 +30,10 @@ def _load_python_file_as_module(python_file_path): """Load a given Python launch file (by path) as a Python module.""" # Taken from apex_core to not introduce a weird dependency thing - loader = SourceFileLoader('python_launch_file', python_file_path) + loader = SourceFileLoader( + os.path.basename(python_file_path), + python_file_path + ) return loader.load_module() @@ -99,12 +103,13 @@ def apex_launchtest_main(): "Test file '{}' is missing generate_test_description function".format(args.test_file) ) - dut_test_description_func = test_module.generate_test_description - _logger_.debug('Checking generate_test_description function signature') + # This is a list of TestRun objects. Each run corresponds to one launch. There may be + # multiple runs if the launch is parametrized + test_runs = LoadTestsFromPythonModule(test_module) + # The runner handles sequcing the launches runner = ApexRunner( - gen_launch_description_fn=dut_test_description_func, - test_module=test_module, + test_runs=test_runs, launch_file_arguments=args.launch_arguments, debug=args.verbose ) @@ -116,30 +121,25 @@ def apex_launchtest_main(): parser.error(e) if args.show_args: + # TODO pete: Handle the case where different launch descriptions take different args? print_arguments_of_launch_description( - launch_description=runner.get_launch_description() + launch_description=test_runs[0].get_launch_description() ) sys.exit(0) _logger_.debug('Running integration test') try: - result, postcheck_result = runner.run() + results = runner.run() _logger_.debug('Done running integration test') if args.xmlpath: - xml_report = unittestResultsToXml( - test_results={ - 'active_tests': result, - 'after_shutdown_tests': postcheck_result - } - ) + xml_report = unittestResultsToXml(test_results=results) xml_report.write(args.xmlpath, xml_declaration=True) - if not result.wasSuccessful(): - sys.exit(1) - - if not postcheck_result.wasSuccessful(): - sys.exit(1) + # There will be one result for every test run (see above where we load the tests) + for result in results.values(): + if not result.wasSuccessful(): + sys.exit(1) except Exception as e: import traceback diff --git a/apex_launchtest/apex_launchtest/apex_runner.py b/apex_launchtest/apex_launchtest/apex_runner.py index d70fe0f..9335cc2 100644 --- a/apex_launchtest/apex_launchtest/apex_runner.py +++ b/apex_launchtest/apex_launchtest/apex_runner.py @@ -24,43 +24,22 @@ from launch.event_handlers import OnProcessIO from .io_handler import ActiveIoHandler -from .loader import PostShutdownTestLoader, PreShutdownTestLoader from .parse_arguments import parse_launch_arguments from .proc_info_handler import ActiveProcInfoHandler from .test_result import FailResult, TestResult -def _normalize_ld(launch_description_fn): - # A launch description fn can return just a launch description, or a tuple of - # (launch_description, test_context). This wrapper function normalizes things - # so we always get a tuple, sometimes with an empty dictionary for the test_context - def wrapper(*args, **kwargs): - result = launch_description_fn(*args, **kwargs) - if isinstance(result, tuple): - return result - else: - return result, {} +class _LaunchDiedException(Exception): + pass - return wrapper - -class ApexRunner(object): +class _RunnerWorker(): def __init__(self, - gen_launch_description_fn, - test_module, + test_run, launch_file_arguments=[], debug=False): - """ - Create an ApexRunner object. - - :param callable gen_launch_description_fn: A function that returns a ros2 LaunchDesription - for launching the processes under test. This function should take a callable as a - parameter which will be called when the processes under test are ready for the test to - start - """ - self._gen_launch_description_fn = gen_launch_description_fn - self._test_module = test_module + self._test_run = test_run self._launch_service = LaunchService(debug=debug) self._processes_launched = threading.Event() # To signal when all processes started self._tests_completed = threading.Event() # To signal when all the tests have finished @@ -68,16 +47,19 @@ def __init__(self, # Can't run LaunchService.run on another thread :-( # See https://github.com/ros2/launch/issues/126 - # Instead, we'll let the tests run on another thread + # + # It would be simpler if we could run the pre-shutdown test and the post-shutdown tests on + # one thread, and run the launch on another thead. + # + # Instead, we'll run the pre-shutdown tests on a background thread concurrent with the + # launch on the main thread. Once the launch is stopped, we'll run the post-shutdown + # tests on the main thread self._test_tr = threading.Thread( target=self._run_test, name='test_runner_thread', daemon=True ) - def get_launch_description(self): - return _normalize_ld(self._gen_launch_description_fn)(lambda: None)[0] - def run(self): """ Launch the processes under test and run the tests. @@ -85,18 +67,54 @@ def run(self): :return: A tuple of two unittest.Results - one for tests that ran while processes were active, and another set for tests that ran after processes were shutdown """ - test_ld, test_context = _normalize_ld( - self._gen_launch_description_fn - )(lambda: self._processes_launched.set()) - - # Data to squirrel away for post-shutdown tests - self.proc_info = ActiveProcInfoHandler() - self.proc_output = ActiveIoHandler() - self.test_context = test_context + test_ld, test_context = self._test_run.normalized_test_description( + ready_fn=lambda: self._processes_launched.set() + ) + + # Data that needs to be bound to the tests: + proc_info = ActiveProcInfoHandler() + proc_output = ActiveIoHandler() + full_context = dict(test_context, **self._test_run.param_args) parsed_launch_arguments = parse_launch_arguments(self._launch_file_arguments) - self.test_args = {} + test_args = {} + for k, v in parsed_launch_arguments: - self.test_args[k] = v + test_args[k] = v + + self._test_run.bind( + self._test_run.pre_shutdown_tests, + injected_attributes={ + 'proc_info': proc_info, + 'proc_output': proc_output, + 'test_args': test_args, + }, + injected_args=dict( + full_context, + # Add a few more things to the args dictionary: + **{ + 'proc_info': proc_info, + 'proc_output': proc_output, + 'test_args': test_args + } + ) + ) + self._test_run.bind( + self._test_run.post_shutdown_tests, + injected_attributes={ + 'proc_info': proc_info._proc_info_handler, + 'proc_output': proc_output._io_handler, + 'test_args': test_args, + }, + injected_args=dict( + full_context, + # Add a few more things to the args dictionary: + **{ + 'proc_info': proc_info._proc_info_handler, + 'proc_output': proc_output._io_handler, + 'test_args': test_args + } + ) + ) # Wrap the test_ld in another launch description so we can bind command line arguments to # the test and add our own event handlers for process IO and process exit: @@ -106,12 +124,12 @@ def run(self): launch_arguments=parsed_launch_arguments ), RegisterEventHandler( - OnProcessExit(on_exit=lambda info, unused: self.proc_info.append(info)) + OnProcessExit(on_exit=lambda info, unused: proc_info.append(info)) ), RegisterEventHandler( OnProcessIO( - on_stdout=self.proc_output.append, - on_stderr=self.proc_output.append, + on_stdout=proc_output.append, + on_stderr=proc_output.append, ) ), ]) @@ -127,39 +145,19 @@ def run(self): # LaunchService.run returned before the tests completed. This can be because the user # did ctrl+c, or because all of the launched nodes died before the tests completed print('Processes under test stopped before tests completed') - self._print_process_output_summary() # <-- Helpful to debug why processes died early + # Give some extra help debugging why processes died early + self._print_process_output_summary(proc_info, proc_output) # We treat this as a test failure and return some test results indicating such - return FailResult(), FailResult() + raise _LaunchDiedException() - # Now, run the post-shutdown tests - inactive_suite = PostShutdownTestLoader( - injected_attributes={ - 'proc_info': self.proc_info, - 'proc_output': self.proc_output._io_handler, - 'test_args': self.test_args, - }, - injected_args=dict( - self.test_context, - # Add a few more things to the args dictionary: - **{ - 'proc_info': self.proc_info, - 'proc_output': self.proc_output._io_handler, - 'test_args': self.test_args - } - ) - ).loadTestsFromModule(self._test_module) inactive_results = unittest.TextTestRunner( verbosity=2, resultclass=TestResult - ).run(inactive_suite) + ).run(self._test_run.post_shutdown_tests) - return self._results, inactive_results + self._results.append(inactive_results) - def validate(self): - """Inspect the test configuration for configuration errors.""" - # Make sure the function signature of the launch configuration - # generator is correct - inspect.getcallargs(self._gen_launch_description_fn, lambda: None) + return self._results def _run_test(self): # Waits for the DUT processes to start (signaled by the _processes_launched @@ -172,43 +170,101 @@ def _run_test(self): return try: - # Load the tests - active_suite = PreShutdownTestLoader( - injected_attributes={ - 'proc_info': self.proc_info, - 'proc_output': self.proc_output, - 'test_args': self.test_args, - }, - injected_args=dict( - self.test_context, - # Add a few more things to the args dictionary: - **{ - 'proc_info': self.proc_info, - 'proc_output': self.proc_output, - 'test_args': self.test_args - } - ) - ).loadTestsFromModule(self._test_module) - # Run the tests self._results = unittest.TextTestRunner( verbosity=2, resultclass=TestResult - ).run(active_suite) + ).run(self._test_run.pre_shutdown_tests) finally: self._tests_completed.set() self._launch_service.shutdown() - def _print_process_output_summary(self): - failed_procs = [proc for proc in self.proc_info if proc.returncode != 0] + def _print_process_output_summary(self, proc_info, proc_output): + failed_procs = [proc for proc in proc_info if proc.returncode != 0] for process in failed_procs: print("Process '{}' exited with {}".format(process.process_name, process.returncode)) print("##### '{}' output #####".format(process.process_name)) try: - for io in self.proc_output[process.action]: + for io in proc_output[process.action]: print('{}'.format(io.text.decode('ascii'))) except KeyError: pass # Process generated no output print('#' * (len(process.process_name) + 21)) + + +class ApexRunner(object): + + def __init__(self, + test_runs, + launch_file_arguments=[], + debug=False): + """ + Create an ApexRunner object. + + :param callable gen_launch_description_fn: A function that returns a ros2 LaunchDesription + for launching the processes under test. This function should take a callable as a + parameter which will be called when the processes under test are ready for the test to + start + """ + self._test_runs = test_runs + self._launch_file_arguments = launch_file_arguments + self._debug = debug + + def run(self): + """ + Launch the processes under test and run the tests. + + :return: A tuple of two unittest.Results - one for tests that ran while processes were + active, and another set for tests that ran after processes were shutdown + """ + # We will return the results as a {test_run: (active_results, post_shutdown_results)} + results = {} + + for index, run in enumerate(self._test_runs): + if len(self._test_runs) > 1: + print('\n***** Starting test run {} *****'.format(run)) + try: + worker = _RunnerWorker(run, self._launch_file_arguments, self._debug) + results[run] = worker.run() + except _LaunchDiedException: + # The most likely cause was ctrl+c, so we'll abort the test run + results[run] = FailResult() + break + + return results + + def validate(self): + """Inspect the test configuration for configuration errors.""" + # Make sure the function signature of the launch configuration + # generator is correct + for run in self._test_runs: + # Drill down into any parametrized test descriptions and make sure the argument names + # are correct. A simpler check can use getcallargs, but then you won't get a very + # helpful message. + base_fn = inspect.unwrap(run.test_description_function) + base_args = inspect.getfullargspec(base_fn) + base_args = base_args.args + base_args.kwonlyargs + + # Check that the parametrized arguments all have a place to go + for argname in run.param_args.keys(): + if argname not in base_args: + raise Exception( + 'Could not find an argument in generate_test_description matching ' + "prametrized argument '{}'".format(argname) + ) + + # Check for extra args in generate_test_description + for argname in base_args: + if argname == 'ready_fn': + continue + if argname not in run.param_args.keys(): + raise Exception( + "generate_test_description has unexpected extra argument '{}'".format( + argname + ) + ) + + # This is a double-check + inspect.getcallargs(run.test_description_function, ready_fn=lambda: None) diff --git a/apex_launchtest/apex_launchtest/junitxml.py b/apex_launchtest/apex_launchtest/junitxml.py index f98a851..7216f0f 100644 --- a/apex_launchtest/apex_launchtest/junitxml.py +++ b/apex_launchtest/apex_launchtest/junitxml.py @@ -44,7 +44,7 @@ def unittestResultsToXml(*, name='apex_launchtest', test_results={}): test_suites.set('errors', str(errors)) for (key, value) in test_results.items(): - test_suites.append(unittestResultToXml(key, value)) + test_suites.append(unittestResultToXml(str(key), value)) return ET.ElementTree(test_suites) @@ -81,7 +81,7 @@ def unittestCaseToXml(test_result, test_case): class needs to be an apex_launchtest TestResult class """ case_xml = ET.Element('testcase') - case_xml.set('name', test_case._testMethodName) + case_xml.set('name', type(test_case).__name__ + '.' + test_case._testMethodName) case_xml.set('time', str(round(test_result.testTimes[test_case], 3))) for failure in test_result.failures: diff --git a/apex_launchtest/apex_launchtest/loader.py b/apex_launchtest/apex_launchtest/loader.py index 5f8e914..f8ad3dd 100644 --- a/apex_launchtest/apex_launchtest/loader.py +++ b/apex_launchtest/apex_launchtest/loader.py @@ -14,18 +14,121 @@ import functools import inspect +import itertools import unittest -def PreShutdownTestLoader(injected_attributes={}, injected_args={}): - return _make_loader(False, injected_attributes, injected_args) +def _normalize_ld(launch_description_fn): + # A launch description fn can return just a launch description, or a tuple of + # (launch_description, test_context). This wrapper function normalizes things + # so we always get a tuple, sometimes with an empty dictionary for the test_context + def wrapper(*args, **kwargs): + result = launch_description_fn(*args, **kwargs) + if isinstance(result, tuple): + return result + else: + return result, {} + + return wrapper + + +class TestRun: + + def __init__(self, + test_description_function, + param_args, + pre_shutdown_tests, + post_shutdown_tests): + + self.test_description_function = test_description_function + self.normalized_test_description = _normalize_ld(test_description_function) + + self.param_args = param_args + + self.pre_shutdown_tests = pre_shutdown_tests + self.post_shutdown_tests = post_shutdown_tests + + # If we're parametrized, extend the test names so we can tell more easily what + # params they were run with + if self.param_args: + for tc in itertools.chain(_iterate_tests_in_test_suite(pre_shutdown_tests), + _iterate_tests_in_test_suite(post_shutdown_tests)): + test_method = getattr(tc, tc._testMethodName) + new_name = tc._testMethodName + self._format_params() + setattr(tc, '_testMethodName', new_name) + setattr(tc, new_name, test_method) + + def bind(self, tests, injected_attributes={}, injected_args={}): + """ + Bind injected_attributes and injected_args to tests. + + Injected Attributes can be accessed from a test as self.name + Injected Arguments can be accessed as an argument if the test has an argument with a + matching name + """ + # Inject test attributes into the test as self.whatever. This method of giving + # objects to the test is pretty inferior to injecting them as arguments to the + # test methods - we may deprecate this in favor of everything being an argument + for name, value in injected_attributes.items(): + _give_attribute_to_tests(value, name, tests) + + # Give objects with matching names as arguments to tests. This doesn't have the + # weird scoping and name collision issues that the above method has. In fact, + # we give proc_info and proc_output to the tests as arguments too, so anything + # you can do with test attributes can also be accomplished with test arguments + _bind_test_args_to_tests(injected_args, tests) + + def get_launch_description(self): + """ + Get just the launch description portion of the test_description. + + This should only be used for the purposes of introspecting the launch description. The + returned launch description is not meant to be launched + """ + return self.test_description_function(lambda: None) + + def __str__(self): + """ + Get the human-readable name of a test run. + + We'll use the module name (set to the file name in apex_launchtest_main when we loaded it) + plus some extra disambiguating info for parametrized tests + """ + return self.test_description_function.__module__ + self._format_params() + + def _format_params(self): + if not self.param_args: + return '' + else: + str_args = map(str, self.param_args.values()) + return '[{}]'.format(', '.join(str_args)) + + +def LoadTestsFromPythonModule(module): + + if hasattr(module.generate_test_description, '__parametrized__'): + normalized_test_description_func = module.generate_test_description + else: + normalized_test_description_func = [(module.generate_test_description, {})] + + # If our test description is parameterized, we'll load a set of tests for each + # individual launch + return [TestRun(description, + args, + PreShutdownTestLoader().loadTestsFromModule(module), + PostShutdownTestLoader().loadTestsFromModule(module)) + for description, args in normalized_test_description_func] + + +def PreShutdownTestLoader(): + return _make_loader(False) -def PostShutdownTestLoader(injected_attributes={}, injected_args={}): - return _make_loader(True, injected_attributes, injected_args) +def PostShutdownTestLoader(): + return _make_loader(True) -def _make_loader(load_post_shutdown, injected_attributes, injected_args): +def _make_loader(load_post_shutdown): class _loader(unittest.TestLoader): """TestLoader selectively loads pre-shutdown or post-shutdown tests.""" @@ -34,19 +137,6 @@ def loadTestsFromTestCase(self, testCaseClass): if getattr(testCaseClass, '__post_shutdown_test__', False) == load_post_shutdown: cases = super(_loader, self).loadTestsFromTestCase(testCaseClass) - - # Inject test attributes into the test as self.whatever. This method of giving - # objects to the test is pretty inferior to injecting them as arguments to the - # test methods - we may deprecate this in favor of everything being an argument - for name, value in injected_attributes.items(): - _give_attribute_to_tests(value, name, cases) - - # Give objects with matching names as arguments to tests. This doesn't have the - # weird scoping and name collision issues that the above method has. In fact, - # we give proc_info and proc_output to the tests as arguments too, so anything - # you can do with test attributes can also be accomplished with test arguments - _bind_test_args_to_tests(injected_args, cases) - return cases else: # Empty test suites will be ignored by the test runner diff --git a/apex_launchtest/apex_launchtest/parametrize.py b/apex_launchtest/apex_launchtest/parametrize.py new file mode 100644 index 0000000..ec4b2e1 --- /dev/null +++ b/apex_launchtest/apex_launchtest/parametrize.py @@ -0,0 +1,56 @@ +# Copyright 2019 Apex.AI, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools + + +def _normalize_to_tuple(val): + if isinstance(val, tuple): + return val + else: + return (val,) + + +def parametrize(argnames, argvalues): + """ + Decorate a test launch description in a way that causes it to run with specified parameters. + + This decorator behaves like the @pytest.mark.parametrize decorator. + + :param: argnames A comma separated list of argument names. + + :param: argvalues The values to use for arguments specified in argnames + """ + argnames = [x.strip() for x in argnames.split(',') if x.strip()] + argvalues = [_normalize_to_tuple(x) for x in argvalues] + + class decorator: + + def __init__(self, func): + setattr(self, '__parametrized__', True) + self.__calls = [] + + for val in argvalues: + partial_args = dict(zip(argnames, val)) + + partial = functools.partial(func, **partial_args) + functools.update_wrapper(partial, func) + self.__calls.append( + (partial, partial_args) + ) + + def __iter__(self): + return iter(self.__calls) + + return decorator diff --git a/apex_launchtest/apex_launchtest/test_result.py b/apex_launchtest/apex_launchtest/test_result.py index f2af0c6..98d6a9e 100644 --- a/apex_launchtest/apex_launchtest/test_result.py +++ b/apex_launchtest/apex_launchtest/test_result.py @@ -64,3 +64,13 @@ def startTest(self, test): def stopTest(self, test): self.__test_cases[test]['end'] = time.time() super().stopTest(test) + + def append(self, results): + self.__test_cases.update(results.__test_cases) + + self.failures += results.failures + self.errors += results.errors + self.testsRun += results.testsRun + self.skipped += results.skipped + self.expectedFailures += results.expectedFailures + self.unexpectedSuccesses += results.unexpectedSuccesses diff --git a/apex_launchtest/examples/parameters.test.py b/apex_launchtest/examples/parameters.test.py new file mode 100644 index 0000000..0d98426 --- /dev/null +++ b/apex_launchtest/examples/parameters.test.py @@ -0,0 +1,67 @@ +# Copyright 2019 Apex.AI, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +import ament_index_python +import apex_launchtest +import apex_launchtest.asserts +import apex_launchtest.util +import launch +import launch.actions + + +@apex_launchtest.parametrize('arg_param', ['thing=On', 'thing=Off', 'flag1']) +def generate_test_description(arg_param, ready_fn): + + terminating_process = launch.actions.ExecuteProcess( + cmd=[ + os.path.join( + ament_index_python.get_package_prefix('apex_launchtest'), + 'lib/apex_launchtest', + 'terminating_proc', + ), + # Use the parameter passed to generate_test_description as an argument + # to the terminating_proc + '--{}'.format(arg_param), + ] + ) + + return ( + launch.LaunchDescription([ + terminating_process, + apex_launchtest.util.KeepAliveProc(), + launch.actions.OpaqueFunction(function=lambda context: ready_fn()) + ]), + {'dut_process': terminating_process} + ) + + +class TestProcessOutput(unittest.TestCase): + + # Note that 'arg_param' is automatically given to the test case, even though it was not + # part of the test context. + def test_process_outputs_expected_value(self, proc_output, arg_param): + proc_output.assertWaitFor('--' + arg_param, timeout=10) + + +@apex_launchtest.post_shutdown_test() +class TestOutputAfterShutdown(unittest.TestCase): + + def test_process_output_expected_value(self, proc_output, arg_param, dut_process): + with apex_launchtest.asserts.assertSequentialStdout(proc_output, dut_process) as cm: + cm.assertInStdout('Starting Up') + cm.assertInStdout('--' + arg_param) + cm.assertInStdout('Shutting Down') diff --git a/apex_launchtest/test/test_apex_runner_validation.py b/apex_launchtest/test/test_apex_runner_validation.py index 998464c..b9d14d7 100644 --- a/apex_launchtest/test/test_apex_runner_validation.py +++ b/apex_launchtest/test/test_apex_runner_validation.py @@ -12,9 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import imp import unittest +import apex_launchtest from apex_launchtest.apex_runner import ApexRunner +from apex_launchtest.loader import LoadTestsFromPythonModule + + +def make_test_run_for_dut(generate_test_description_function): + module = imp.new_module('test_module') + module.generate_test_description = generate_test_description_function + return LoadTestsFromPythonModule(module) class TestApexRunnerValidation(unittest.TestCase): @@ -22,16 +31,41 @@ class TestApexRunnerValidation(unittest.TestCase): def test_catches_bad_signature(self): dut = ApexRunner( - gen_launch_description_fn=lambda: None, - test_module=None + make_test_run_for_dut( + lambda: None + ) ) with self.assertRaises(TypeError): dut.validate() dut = ApexRunner( - gen_launch_description_fn=lambda fn: None, - test_module=None + make_test_run_for_dut( + lambda ready_fn: None + ) ) dut.validate() + + def test_too_many_arguments(self): + + dut = ApexRunner( + make_test_run_for_dut(lambda ready_fn, extra_arg: None) + ) + + with self.assertRaisesRegex(Exception, "unexpected extra argument 'extra_arg'"): + dut.validate() + + def test_bad_parametrization_argument(self): + + @apex_launchtest.parametrize('bad_argument', [1, 2, 3]) + def bad_launch_description(ready_fn): + pass # pragma: no cover + + dut = ApexRunner( + make_test_run_for_dut(bad_launch_description) + ) + + with self.assertRaisesRegex(Exception, 'Could not find an argument') as cm: + dut.validate() + self.assertIn('bad_argument', str(cm.exception)) diff --git a/apex_launchtest/test/test_parametrize_decorator.py b/apex_launchtest/test/test_parametrize_decorator.py new file mode 100644 index 0000000..a5a14df --- /dev/null +++ b/apex_launchtest/test/test_parametrize_decorator.py @@ -0,0 +1,80 @@ +# Copyright 2019 Apex.AI, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import apex_launchtest + + +def test_parametrize_attribute(): + + @apex_launchtest.parametrize('val', [1, 2, 3]) + def fake_test_description(arg): + pass # pragma: no cover + + assert hasattr(fake_test_description, '__parametrized__') + + +def test_binding_arguments(): + + results = [] + + @apex_launchtest.parametrize('val', [1, 2, 3]) + def fake_test_description(val): + results.append(val) + + for func, params in fake_test_description: + func() + + assert results == [1, 2, 3] + + +def test_binding_one_tuples(): + + results = [] + + @apex_launchtest.parametrize('val', [(1,), (2,), (3,)]) + def fake_test_description(val): + results.append(val) + + for func, params in fake_test_description: + func() + + assert results == [1, 2, 3] + + +def test_partial_binding(): + + results = [] + + @apex_launchtest.parametrize('val', ['x', 'y', 'z']) + def fake_test_description(val, arg): + results.append((val, arg)) + + for index, (func, params) in enumerate(fake_test_description): + func(arg=index) + + assert results == [('x', 0), ('y', 1), ('z', 2)] + + +def test_multiple_args(): + + results = [] + + @apex_launchtest.parametrize('arg_1, arg_2', [(5, 10), (15, 20), (25, 30)]) + def fake_test_description(arg_1, arg_2): + results.append((arg_1, arg_2)) + + for index, (func, params) in enumerate(fake_test_description): + func() + + assert results == [(5, 10), (15, 20), (25, 30)] diff --git a/apex_launchtest/test/test_parametrized_description_import.py b/apex_launchtest/test/test_parametrized_description_import.py new file mode 100644 index 0000000..e2ad5bf --- /dev/null +++ b/apex_launchtest/test/test_parametrized_description_import.py @@ -0,0 +1,67 @@ +# Copyright 2019 Apex.AI, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import types +import unittest + +import apex_launchtest +from apex_launchtest.loader import LoadTestsFromPythonModule + + +class TestModuleImport(unittest.TestCase): + + def setUp(self): + class FakePreShutdownTests(unittest.TestCase): + + def test_1(self): + pass # pragma: no cover + + def test_2(self): + pass # pragma: no cover + + @apex_launchtest.post_shutdown_test() + class FakePostShutdownTests(unittest.TestCase): + + def test_3(self): + pass # pragma: no cover + + def test_4(self): + pass # pragma: no cover + + self.test_module = types.ModuleType('test_module') + self.test_module.FakePreShutdownTests = FakePreShutdownTests + self.test_module.FakePostShutdownTests = FakePostShutdownTests + + def test_non_parametrized_test_description(self): + + def generate_test_description(ready_func): + pass # pragma: no cover + + self.test_module.generate_test_description = generate_test_description + + test_runs = LoadTestsFromPythonModule(self.test_module) + + assert len(test_runs) == 1 + + def test_parametrized_test_description(self): + + @apex_launchtest.parametrize('arg_1', [1, 2, 3, 4, 5]) + def generate_test_description(ready_func, arg_1): + pass # pragma: no cover + + self.test_module.generate_test_description = generate_test_description + + test_runs = LoadTestsFromPythonModule(self.test_module) + + assert len(test_runs) == 5 diff --git a/apex_launchtest/test/test_runner_results.py b/apex_launchtest/test/test_runner_results.py index c57ec8c..c8577cc 100644 --- a/apex_launchtest/test/test_runner_results.py +++ b/apex_launchtest/test/test_runner_results.py @@ -14,9 +14,15 @@ import imp import os +import types +import unittest import ament_index_python +import apex_launchtest from apex_launchtest.apex_runner import ApexRunner +from apex_launchtest.loader import LoadTestsFromPythonModule +from apex_launchtest.loader import TestRun as TR + import launch import launch.actions @@ -42,16 +48,15 @@ def generate_test_description(ready_fn): launch.actions.OpaqueFunction(function=lambda context: ready_fn()), ]) - with mock.patch('apex_launchtest.apex_runner.ApexRunner._run_test'): + with mock.patch('apex_launchtest.apex_runner._RunnerWorker._run_test'): runner = ApexRunner( - gen_launch_description_fn=generate_test_description, - test_module=None + [TR(generate_test_description, {}, [], [])] ) - pre_result, post_result = runner.run() + results = runner.run() - assert not pre_result.wasSuccessful() - assert not post_result.wasSuccessful() + for result in results.values(): + assert not result.wasSuccessful() # This is the negative version of the test below. If no exit code, no extra output # is generated @@ -90,16 +95,15 @@ def generate_test_description(ready_fn): launch.actions.OpaqueFunction(function=lambda context: ready_fn()), ]) - with mock.patch('apex_launchtest.apex_runner.ApexRunner._run_test'): + with mock.patch('apex_launchtest.apex_runner._RunnerWorker._run_test'): runner = ApexRunner( - gen_launch_description_fn=generate_test_description, - test_module=None + [TR(generate_test_description, {}, [], [])] ) - pre_result, post_result = runner.run() + results = runner.run() - assert not pre_result.wasSuccessful() - assert not post_result.wasSuccessful() + for result in results.values(): + assert not result.wasSuccessful() # Make sure some information about WHY the process died shows up in the output out, err = capsys.readouterr() @@ -145,13 +149,68 @@ def generate_test_description(ready_fn): launch.actions.OpaqueFunction(function=lambda context: ready_fn()), ]) + module.generate_test_description = generate_test_description + + runner = ApexRunner( + LoadTestsFromPythonModule(module) + ) + + results = runner.run() + + for result in results.values(): + assert result.wasSuccessful() + + +def test_parametrized_run_with_one_failure(): + + # Test Data + @apex_launchtest.parametrize('arg_val', [1, 2, 3, 4, 5]) + def generate_test_description(arg_val, ready_fn): + TEST_PROC_PATH = os.path.join( + ament_index_python.get_package_prefix('apex_launchtest'), + 'lib/apex_launchtest', + 'good_proc' + ) + + # This is necessary to get unbuffered output from the process under test + proc_env = os.environ.copy() + proc_env['PYTHONUNBUFFERED'] = '1' + + return launch.LaunchDescription([ + launch.actions.ExecuteProcess( + cmd=[TEST_PROC_PATH], + env=proc_env, + ), + launch.actions.OpaqueFunction(function=lambda context: ready_fn()) + ]) + + class FakePreShutdownTests(unittest.TestCase): + + def test_fail_on_two(self, proc_output, arg_val): + proc_output.assertWaitFor('Starting Up') + assert arg_val != 2 + + @apex_launchtest.post_shutdown_test() + class FakePostShutdownTests(unittest.TestCase): + + def test_fail_on_three(self, arg_val): + assert arg_val != 3 + + # Set up a fake module containing the test data: + test_module = types.ModuleType('test_module') + test_module.generate_test_description = generate_test_description + test_module.FakePreShutdownTests = FakePreShutdownTests + test_module.FakePostShutdownTests = FakePostShutdownTests + + # Run the test: runner = ApexRunner( - gen_launch_description_fn=generate_test_description, - test_module=module + LoadTestsFromPythonModule(test_module) ) - pre_result, post_result = runner.run() + results = runner.run() - assert pre_result.wasSuccessful() + passes = [result for result in results.values() if result.wasSuccessful()] + fails = [result for result in results.values() if not result.wasSuccessful()] - assert pre_result.wasSuccessful() + assert len(passes) == 3 # 1, 4, and 5 should pass + assert len(fails) == 2 # 2 fails in an active test, 3 fails in a post-shutdown test diff --git a/apex_launchtest/test/test_xml_output.py b/apex_launchtest/test/test_xml_output.py index bcde76b..c63a54b 100644 --- a/apex_launchtest/test/test_xml_output.py +++ b/apex_launchtest/test/test_xml_output.py @@ -21,6 +21,7 @@ import ament_index_python from apex_launchtest.junitxml import unittestResultsToXml from apex_launchtest.test_result import FailResult +from apex_launchtest.test_result import TestResult as TR class TestGoodXmlOutput(unittest.TestCase): @@ -55,11 +56,17 @@ def test_pre_and_post(self): tree = ET.parse(self.xml_file) root = tree.getroot() - self.assertEqual(len(root.getchildren()), 2) + self.assertEqual(len(root.getchildren()), 1) + test_suite = root.getchildren()[0] - # Expecting an element called 'active_tests' and 'after_shutdown_tests' - child_names = [chld.attrib['name'] for chld in root.getchildren()] - self.assertEqual(set(child_names), {'active_tests', 'after_shutdown_tests'}) + # Expecting an element called 'good_proc.test.py' since this was not parametrized + self.assertEqual(test_suite.attrib['name'], 'good_proc.test.py') + + # Drilling down a little further, we expect the class names to show up in the testcase + # names + case_names = [case.attrib['name'] for case in test_suite.getchildren()] + self.assertIn('TestGoodProcess.test_count_to_four', case_names) + self.assertIn('TestProcessOutput.test_full_output', case_names) class TestXmlFunctions(unittest.TestCase): @@ -76,3 +83,16 @@ def test_fail_results_serialize(self): # Simple sanity check - see that there's a child element called active_tests child_names = [chld.attrib['name'] for chld in xml_tree.getroot().getchildren()] self.assertEqual(set(child_names), {'active_tests'}) + + def test_multiple_test_results(self): + xml_tree = unittestResultsToXml( + name='multiple_launches', + test_results={ + 'launch_1': TR(None, True, 1), + 'launch_2': TR(None, True, 1), + 'launch_3': TR(None, True, 1), + } + ) + + child_names = [chld.attrib['name'] for chld in xml_tree.getroot().getchildren()] + self.assertEqual(set(child_names), {'launch_1', 'launch_2', 'launch_3'})