Skip to content
This repository has been archived by the owner on Jan 27, 2020. It is now read-only.

Commit

Permalink
Beef up test coverage for new features
Browse files Browse the repository at this point in the history
Signed-off-by: Pete Baughman <pete.baughman@apex.ai>
  • Loading branch information
Pete Baughman committed Apr 8, 2019
1 parent bf0c713 commit d028b62
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 0 deletions.
59 changes: 59 additions & 0 deletions apex_launchtest/test/test_runner_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,15 @@

import imp
import os
import types
import unittest

import ament_index_python
import apex_launchtest
from apex_launchtest.apex_runner import ApexRunner
from apex_launchtest.loader import LoadTestsFromPythonModule
from apex_launchtest.loader import TestRun as TR

import launch
import launch.actions

Expand Down Expand Up @@ -155,3 +159,58 @@ def generate_test_description(ready_fn):

for result in results.values():
assert result.wasSuccessful()


def test_parametrized_run_with_one_failure():

# Test Data
@apex_launchtest.parametrize('arg_val', [1, 2, 3, 4, 5])
def generate_test_description(arg_val, ready_fn):
TEST_PROC_PATH = os.path.join(
ament_index_python.get_package_prefix('apex_launchtest'),
'lib/apex_launchtest',
'good_proc'
)

# This is necessary to get unbuffered output from the process under test
proc_env = os.environ.copy()
proc_env['PYTHONUNBUFFERED'] = '1'

return launch.LaunchDescription([
launch.actions.ExecuteProcess(
cmd=[TEST_PROC_PATH],
env=proc_env,
),
launch.actions.OpaqueFunction(function=lambda context: ready_fn())
])

class FakePreShutdownTests(unittest.TestCase):

def test_fail_on_two(self, proc_output, arg_val):
proc_output.assertWaitFor('Starting Up')
assert arg_val != 2

@apex_launchtest.post_shutdown_test()
class FakePostShutdownTests(unittest.TestCase):

def test_fail_on_three(self, arg_val):
assert arg_val != 3

# Set up a fake module containing the test data:
test_module = types.ModuleType('test_module')
test_module.generate_test_description = generate_test_description
test_module.FakePreShutdownTests = FakePreShutdownTests
test_module.FakePostShutdownTests = FakePostShutdownTests

# Run the test:
runner = ApexRunner(
LoadTestsFromPythonModule(test_module)
)

results = runner.run()

passes = [result for result in results.values() if result.wasSuccessful()]
fails = [result for result in results.values() if not result.wasSuccessful()]

assert len(passes) == 3 # 1, 4, and 5 should pass
assert len(fails) == 2 # 2 fails in an active test, 3 fails in a post-shutdown test
14 changes: 14 additions & 0 deletions apex_launchtest/test/test_xml_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import ament_index_python
from apex_launchtest.junitxml import unittestResultsToXml
from apex_launchtest.test_result import FailResult
from apex_launchtest.test_result import TestResult as TR


class TestGoodXmlOutput(unittest.TestCase):
Expand Down Expand Up @@ -82,3 +83,16 @@ def test_fail_results_serialize(self):
# Simple sanity check - see that there's a child element called active_tests
child_names = [chld.attrib['name'] for chld in xml_tree.getroot().getchildren()]
self.assertEqual(set(child_names), {'active_tests'})

def test_multiple_test_results(self):
xml_tree = unittestResultsToXml(
name='multiple_launches',
test_results={
'launch_1': TR(None, True, 1),
'launch_2': TR(None, True, 1),
'launch_3': TR(None, True, 1),
}
)

child_names = [chld.attrib['name'] for chld in xml_tree.getroot().getchildren()]
self.assertEqual(set(child_names), {'launch_1', 'launch_2', 'launch_3'})

0 comments on commit d028b62

Please sign in to comment.