Skip to content
This repository has been archived by the owner on Jan 27, 2020. It is now read-only.

Commit

Permalink
Add parametrized example, finish refactor
Browse files Browse the repository at this point in the history
  - Combine pre and post results into one set of results, add class names so the tests
    are distinguishable
  - Update tests to match new 'run' API that returns a dictionary of {test_run: result} instead
    of a tuple of (pre_shutdown, post_shutdown) results
  • Loading branch information
Pete Baughman committed Apr 8, 2019
1 parent 21f8a07 commit 7e2da27
Show file tree
Hide file tree
Showing 11 changed files with 138 additions and 47 deletions.
18 changes: 6 additions & 12 deletions apex_launchtest/apex_launchtest/apex_launchtest_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,23 +126,17 @@ def apex_launchtest_main():

_logger_.debug('Running integration test')
try:
result, postcheck_result = runner.run()
results = runner.run()
_logger_.debug('Done running integration test')

if args.xmlpath:
xml_report = unittestResultsToXml(
test_results={
'active_tests': result,
'after_shutdown_tests': postcheck_result
}
)
xml_report = unittestResultsToXml(test_results=results)
xml_report.write(args.xmlpath, xml_declaration=True)

if not result.wasSuccessful():
sys.exit(1)

if not postcheck_result.wasSuccessful():
sys.exit(1)
# There will be one result for every test run (see above where we load the tests)
for result in results.values():
if not result.wasSuccessful():
sys.exit(1)

except Exception as e:
import traceback
Expand Down
25 changes: 16 additions & 9 deletions apex_launchtest/apex_launchtest/apex_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,16 +64,17 @@ def run(self):
active, and another set for tests that ran after processes were shutdown
"""
test_ld, test_context = self._test_run.normalized_test_description(
lambda: self._processes_launched.set()
ready_fn=lambda: self._processes_launched.set()
)

# Data that needs to be bound to the tests:
proc_info = ActiveProcInfoHandler()
proc_output = ActiveIoHandler()
test_context = test_context
full_context = dict(test_context, **self._test_run.param_args)
# TODO pete: this can be simplified as a call to the dict ctor:
parsed_launch_arguments = parse_launch_arguments(self._launch_file_arguments)
test_args = {}

for k, v in parsed_launch_arguments:
test_args[k] = v

Expand All @@ -85,7 +86,7 @@ def run(self):
'test_args': test_args,
},
injected_args=dict(
test_context,
full_context,
# Add a few more things to the args dictionary:
**{
'proc_info': proc_info,
Expand All @@ -102,7 +103,7 @@ def run(self):
'test_args': test_args,
},
injected_args=dict(
test_context,
full_context,
# Add a few more things to the args dictionary:
**{
'proc_info': proc_info._proc_info_handler,
Expand Down Expand Up @@ -144,14 +145,16 @@ def run(self):
# Give some extra help debugging why processes died early
self._print_process_output_summary(proc_info, proc_output)
# We treat this as a test failure and return some test results indicating such
return FailResult(), FailResult()
return FailResult()

inactive_results = unittest.TextTestRunner(
verbosity=2,
resultclass=TestResult
).run(self._test_run.post_shutdown_tests)

return self._results, inactive_results
self._results.append(inactive_results)

return self._results

def _run_test(self):
# Waits for the DUT processes to start (signaled by the _processes_launched
Expand Down Expand Up @@ -213,14 +216,18 @@ def run(self):
:return: A tuple of two unittest.Results - one for tests that ran while processes were
active, and another set for tests that ran after processes were shutdown
"""
# We will return the results as a {test_run: (active_results, post_shutdown_results)}
results = {}

for run in self._test_runs:
worker = _RunnerWorker(run, self._launch_file_arguments, self._debug)
# TODO pete: Make this work for parameterized launches by combining the results
return worker.run()
results[run] = worker.run()

return results

def validate(self):
"""Inspect the test configuration for configuration errors."""
# Make sure the function signature of the launch configuration
# generator is correct
for run in self._test_runs:
inspect.getcallargs(run.test_description_function, lambda: None)
inspect.getcallargs(run.test_description_function, ready_fn=lambda: None)
4 changes: 2 additions & 2 deletions apex_launchtest/apex_launchtest/junitxml.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def unittestResultsToXml(*, name='apex_launchtest', test_results={}):
test_suites.set('errors', str(errors))

for (key, value) in test_results.items():
test_suites.append(unittestResultToXml(key, value))
test_suites.append(unittestResultToXml(str(key), value))

return ET.ElementTree(test_suites)

Expand Down Expand Up @@ -81,7 +81,7 @@ def unittestCaseToXml(test_result, test_case):
class needs to be an apex_launchtest TestResult class
"""
case_xml = ET.Element('testcase')
case_xml.set('name', test_case._testMethodName)
case_xml.set('name', type(test_case).__name__ + '.' + test_case._testMethodName)
case_xml.set('time', str(round(test_result.testTimes[test_case], 3)))

for failure in test_result.failures:
Expand Down
21 changes: 18 additions & 3 deletions apex_launchtest/apex_launchtest/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,17 @@ def wrapper(*args, **kwargs):

class TestRun:

def __init__(self, test_description_function, pre_shutdown_tests, post_shutdown_tests):
def __init__(self,
test_description_function,
param_args,
pre_shutdown_tests,
post_shutdown_tests):

self.test_description_function = test_description_function
self.normalized_test_description = _normalize_ld(test_description_function)

self.param_args = param_args

self.pre_shutdown_tests = pre_shutdown_tests
self.post_shutdown_tests = post_shutdown_tests

Expand Down Expand Up @@ -68,20 +76,27 @@ def get_launch_description(self):
"""
return self.test_description_function(lambda: None)

def __str__(self):
if not self.param_args:
return 'launch'
else:
return 'TODO Parametrize'


def LoadTestsFromPythonModule(module):

if hasattr(module.generate_test_description, '__parametrized__'):
normalized_test_description_func = module.generate_test_description
else:
normalized_test_description_func = [module.generate_test_description]
normalized_test_description_func = [(module.generate_test_description, {})]

# If our test description is parameterized, we'll load a set of tests for each
# individual launch
return [TestRun(description,
args,
PreShutdownTestLoader().loadTestsFromModule(module),
PostShutdownTestLoader().loadTestsFromModule(module))
for description in normalized_test_description_func]
for description, args in normalized_test_description_func]


def PreShutdownTestLoader():
Expand Down
4 changes: 3 additions & 1 deletion apex_launchtest/apex_launchtest/parametrize.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,10 @@ def __init__(self, func):
for val in argvalues:
partial_args = dict(zip(argnames, val))

partial = functools.partial(func, **partial_args)
functools.update_wrapper(partial, func)
self.__calls.append(
functools.partial(func, **partial_args)
(partial, partial_args)
)

def __iter__(self):
Expand Down
10 changes: 10 additions & 0 deletions apex_launchtest/apex_launchtest/test_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,13 @@ def startTest(self, test):
def stopTest(self, test):
self.__test_cases[test]['end'] = time.time()
super().stopTest(test)

def append(self, results):
self.__test_cases.update(results.__test_cases)

self.failures += results.failures
self.errors += results.errors
self.testsRun += results.testsRun
self.skipped += results.skipped
self.expectedFailures += results.expectedFailures
self.unexpectedSuccesses += results.unexpectedSuccesses
56 changes: 56 additions & 0 deletions apex_launchtest/examples/parameters.test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright 2019 Apex.AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import ament_index_python
import apex_launchtest
import apex_launchtest.util
import launch
import launch.actions


@apex_launchtest.parametrize('arg_param', ['thing=On', 'thing=Off', 'flag1'])
def generate_test_description(arg_param, ready_fn):

terminating_process = launch.actions.ExecuteProcess(
cmd=[
os.path.join(
ament_index_python.get_package_prefix('apex_launchtest'),
'lib/apex_launchtest',
'terminating_proc',
),
# Use the parameter passed to generate_test_description as an argument
# to the terminating_proc
'--{}'.format(arg_param),
]
)

return (
launch.LaunchDescription([
terminating_process,
apex_launchtest.util.KeepAliveProc(),
launch.actions.OpaqueFunction(function=lambda context: ready_fn())
]),
{'dut_process': terminating_process}
)


class TestProcessOutput(unittest.TestCase):

# Note that 'arg_param' is automatically given to the test case, even though it was not
# part of the test context.
def test_process_outputs_expectd_value(self, proc_output, arg_param):
proc_output.assertWaitFor('--' + arg_param, timeout=10)
2 changes: 2 additions & 0 deletions apex_launchtest/test/test_apex_runner_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def test_catches_bad_signature(self):
dut = ApexRunner(
[TR(
test_description_function=lambda: None,
param_args={},
pre_shutdown_tests=None,
post_shutdown_tests=None,
)]
Expand All @@ -36,6 +37,7 @@ def test_catches_bad_signature(self):
dut = ApexRunner(
[TR(
test_description_function=lambda ready_fn: None,
param_args={},
pre_shutdown_tests=None,
post_shutdown_tests=None,
)]
Expand Down
8 changes: 4 additions & 4 deletions apex_launchtest/test/test_parametrize_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def test_binding_arguments():
def fake_test_description(val):
results.append(val)

for func in fake_test_description:
for func, params in fake_test_description:
func()

assert results == [1, 2, 3]
Expand All @@ -46,7 +46,7 @@ def test_binding_one_tuples():
def fake_test_description(val):
results.append(val)

for func in fake_test_description:
for func, params in fake_test_description:
func()

assert results == [1, 2, 3]
Expand All @@ -60,7 +60,7 @@ def test_partial_binding():
def fake_test_description(val, arg):
results.append((val, arg))

for index, func in enumerate(fake_test_description):
for index, (func, params) in enumerate(fake_test_description):
func(arg=index)

assert results == [('x', 0), ('y', 1), ('z', 2)]
Expand All @@ -74,7 +74,7 @@ def test_multiple_args():
def fake_test_description(arg_1, arg_2):
results.append((arg_1, arg_2))

for index, func in enumerate(fake_test_description):
for index, (func, params) in enumerate(fake_test_description):
func()

assert results == [(5, 10), (15, 20), (25, 30)]
23 changes: 11 additions & 12 deletions apex_launchtest/test/test_runner_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,13 @@ def generate_test_description(ready_fn):

with mock.patch('apex_launchtest.apex_runner._RunnerWorker._run_test'):
runner = ApexRunner(
[TR(generate_test_description, [], [])]
[TR(generate_test_description, {}, [], [])]
)

pre_result, post_result = runner.run()
results = runner.run()

assert not pre_result.wasSuccessful()
assert not post_result.wasSuccessful()
for result in results.values():
assert not result.wasSuccessful()

# This is the negative version of the test below. If no exit code, no extra output
# is generated
Expand Down Expand Up @@ -93,13 +93,13 @@ def generate_test_description(ready_fn):

with mock.patch('apex_launchtest.apex_runner._RunnerWorker._run_test'):
runner = ApexRunner(
[TR(generate_test_description, [], [])]
[TR(generate_test_description, {}, [], [])]
)

pre_result, post_result = runner.run()
results = runner.run()

assert not pre_result.wasSuccessful()
assert not post_result.wasSuccessful()
for result in results.values():
assert not result.wasSuccessful()

# Make sure some information about WHY the process died shows up in the output
out, err = capsys.readouterr()
Expand Down Expand Up @@ -151,8 +151,7 @@ def generate_test_description(ready_fn):
LoadTestsFromPythonModule(module)
)

pre_result, post_result = runner.run()
results = runner.run()

assert pre_result.wasSuccessful()

assert pre_result.wasSuccessful()
for result in results.values():
assert result.wasSuccessful()
14 changes: 10 additions & 4 deletions apex_launchtest/test/test_xml_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,17 @@ def test_pre_and_post(self):
tree = ET.parse(self.xml_file)
root = tree.getroot()

self.assertEqual(len(root.getchildren()), 2)
self.assertEqual(len(root.getchildren()), 1)
test_suite = root.getchildren()[0]

# Expecting an element called 'active_tests' and 'after_shutdown_tests'
child_names = [chld.attrib['name'] for chld in root.getchildren()]
self.assertEqual(set(child_names), {'active_tests', 'after_shutdown_tests'})
# Expecting an element called 'launch' since this was not parametrized
self.assertEqual(test_suite.attrib['name'], 'launch')

# Drilling down a little further, we expect the class names to show up in the testcase
# names
case_names = [case.attrib['name'] for case in test_suite.getchildren()]
self.assertIn('TestGoodProcess.test_count_to_four', case_names)
self.assertIn('TestProcessOutput.test_full_output', case_names)


class TestXmlFunctions(unittest.TestCase):
Expand Down

0 comments on commit 7e2da27

Please sign in to comment.