Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CORE-14 Add FIPS failure flag #33

Merged
merged 2 commits into from
Mar 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ducktape/mark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from ._mark import parametrize, matrix, defaults, ignore, ok_to_fail, parametrized, ignored, oked_to_fail, env, is_env # NOQA
from ._mark import parametrize, matrix, defaults, ignore, ok_to_fail, parametrized, ignored, oked_to_fail, env, is_env, ok_to_fail_fips, oked_to_fail_fips # NOQA
53 changes: 53 additions & 0 deletions ducktape/mark/_mark.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,24 @@ def apply(self, seed_context, context_list):
return context_list


class OkToFailFIPS(Mark):
"""Run the test but categorize status as OPASSFIPS or OFAILFIPS instead of PASS or FAIL."""

def __init__(self):
super(OkToFailFIPS, self).__init__()
self.injected_args = None

@property
def name(self):
return "OK_TO_FAIL_FIPS"

def apply(self, seed_context, context_list):
assert len(context_list) > 0, "ignore annotation is not being applied to any test cases"
for ctx in context_list:
ctx.ok_to_fail_fips = ctx.ok_to_fail_fips or self.injected_args is None
return context_list


class Matrix(Mark):
"""Parametrize with a matrix of arguments.
Assume each values in self.injected_args is iterable
Expand Down Expand Up @@ -242,6 +260,7 @@ def __eq__(self, other):
DEFAULTS = Defaults()
IGNORE = Ignore()
OK_TO_FAIL = OkToFail()
OK_TO_FAIL_FIPS = OkToFailFIPS()
ENV = Env()


Expand All @@ -264,6 +283,11 @@ def oked_to_fail(f):
return Mark.marked(f, OK_TO_FAIL)


def oked_to_fail_fips(f):
"""Is this function or object decorated with @ok_to_fail_fips?"""
return Mark.marked(f, OK_TO_FAIL_FIPS)


def is_env(f):
return Mark.marked(f, ENV)

Expand Down Expand Up @@ -459,6 +483,35 @@ def the_test(...):
return args[0]


def ok_to_fail_fips(*args, **kwargs):
"""
Test method decorator which signals to the test runner to run test but to set OFAIL_FIPS or OPASS_FIPS.
This mark is only applied if the operating system is actually running in FIPS mode. If not, no mark is made
and test runs as normal

Example::
@ok_to_fail_fips
def the_test(...):
...
"""
def running_fips() -> bool:
fips_file = "/proc/sys/crypto/fips_enabled"
if os.path.exists(fips_file) and os.path.isfile(fips_file):
with open(fips_file, 'r') as f:
contents = f.read().strip()
return contents == '1'

return False

if len(args) == 1 and len(kwargs) == 0 and running_fips():
# this corresponds to the usage of the decorator with no arguments
# @ok_to_fail_fips
# def test_function:
# ...
Mark.mark(args[0], OkToFailFIPS())
return args[0]


def env(**kwargs):
def environment(f):
Mark.mark(f, Env(**kwargs))
Expand Down
8 changes: 8 additions & 0 deletions ducktape/templates/report/report.css
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@ h1, h2, h3, h4, h5, h6 {
background-color: #9cf;
}

.ofailfips {
background-color: #ffc
}

.opassfips {
background-color: #9cf;
}

.testcase {
margin-left: 2em;
}
12 changes: 12 additions & 0 deletions ducktape/templates/report/report.html
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
<div id="flaky_test_panel"></div>
<div id="opassed_test_panel"></div>
<div id="ofailed_test_panel"></div>
<div id="opassedfips_test_panel"></div>
<div id="ofailedfips_test_panel"></div>
<div id="passed_test_panel"></div>
<script type="text/jsx">
/* This small block makes it possible to use React dev tools in the Chrome browser */
Expand Down Expand Up @@ -46,6 +48,8 @@ <h1>
<td colSpan='5' align='center'>{this.props.summary_prop.ignored}</td>
<td colSpan='5' align='center'>{this.props.summary_prop.opassed}</td>
<td colSpan='5' align='center'>{this.props.summary_prop.ofailed}</td>
<td colSpan='5' align='center'>{this.props.summary_prop.opassedfips}</td>
<td colSpan='5' align='center'>{this.props.summary_prop.ofailedfips}</td>
<td colSpan='5' align='center'>{this.props.summary_prop.run_time}</td>
</tr>
);
Expand All @@ -65,6 +69,8 @@ <h1>
<th colSpan='5' align='center'>Ignored</th>
<th colSpan='5' align='center'>OPassed</th>
<th colSpan='5' align='center'>OFailed</th>
<th colSpan='5' align='center'>OPassedFIPS</th>
<th colSpan='5' align='center'>OFailedFIPS</th>
<th colSpan='5' align='center'>Time</th>
</tr>
</thead>
Expand Down Expand Up @@ -191,6 +197,8 @@ <h2>{this.props.title}</h2>
"ignored": %(num_ignored)d,
"opassed": %(num_opassed)d,
"ofailed": %(num_ofailed)d,
"opassedfips": %(num_opassedfips)d,
"ofailedfips": %(num_ofailedfips)d,
"run_time": '%(run_time)s'
}];

Expand All @@ -207,6 +215,8 @@ <h2>{this.props.title}</h2>
IGNORED_TESTS=[%(ignored_tests)s];
OPASSED_TESTS=[%(opassed_tests)s];
OFAILED_TESTS=[%(ofailed_tests)s];
OPASSEDFIPS_TESTS=[%(opassedfips_tests)s];
OFAILEDFIPS_TESTS=[%(ofailedfips_tests)s];

React.render(<Heading heading={HEADING}/>, document.getElementById('heading'));
React.render(<ColorKeyPanel test_status_names={COLOR_KEYS}/>, document.getElementById('color_key_panel'));
Expand All @@ -216,6 +226,8 @@ <h2>{this.props.title}</h2>
React.render(<TestPanel title="Flaky Tests" tests={FLAKY_TESTS}/>, document.getElementById('flaky_test_panel'));
React.render(<TestPanel title="OPassed Tests" tests={OPASSED_TESTS}/>, document.getElementById('opassed_test_panel'));
React.render(<TestPanel title="OFailed Tests" tests={OFAILED_TESTS}/>, document.getElementById('ofailed_test_panel'));
React.render(<TestPanel title="OPassed FIPS Tests" tests={OPASSEDFIPS_TESTS}/>, document.getElementById('opassedfips_test_panel'));
React.render(<TestPanel title="OFailed FIPS Tests" tests={OFAILEDFIPS_TESTS}/>, document.getElementById('ofailedfips_test_panel'));
React.render(<TestPanel title="Passed Tests" tests={PASSED_TESTS}/>, document.getElementById('passed_test_panel'));
</script>
</body>
Expand Down
35 changes: 30 additions & 5 deletions ducktape/tests/reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

from ducktape.utils.terminal_size import get_terminal_size
from ducktape.utils.util import ducktape_version
from ducktape.tests.status import PASS, FAIL, IGNORE, FLAKY, OPASS, OFAIL
from ducktape.tests.status import PASS, FAIL, IGNORE, FLAKY, OPASS, OFAIL, OPASSFIPS, OFAILFIPS
from ducktape.json_serializable import DucktapeJSONEncoder


Expand Down Expand Up @@ -117,6 +117,8 @@ def footer_string(self):
"ignored: %d" % self.results.num_ignored,
"opassed: %d" % self.results.num_opassed,
"ofailed: %d" % self.results.num_ofailed,
"opassedfips: %d" % self.results.num_opassedfips,
"ofailedfips: %d" % self.results.num_ofailedfips,
"=" * self.width
]

Expand All @@ -130,6 +132,8 @@ def report_string(self):
failed = []
ofail = []
opass = []
ofailfips = []
opassfips = []
for result in self.results:
if result.test_status == FAIL:
failed.append(result)
Expand All @@ -139,10 +143,14 @@ def report_string(self):
opass.append(result)
elif result.test_status == OFAIL:
ofail.append(result)
elif result.test_status == OPASSFIPS:
opassfips.append(result)
elif result.test_status == OFAILFIPS:
ofailfips.append(result)
else:
passed.append(result)

ordered_results = passed + ignored + failed + opass + ofail
ordered_results = passed + ignored + failed + opass + ofail + opassfips + ofailfips

report_lines = \
[SingleResultReporter(result).result_string() + "\n" + "-" * self.width for result in ordered_results]
Expand Down Expand Up @@ -205,9 +213,14 @@ def report(self):
testsuite['skipped'] += 1
elif result.test_status == OFAIL:
testsuite['skipped'] += 1
elif result.test_status == OPASSFIPS:
testsuite['skipped'] += 1
elif result.test_status == OFAILFIPS:
testsuite['skipped'] += 1

total = self.results.num_failed + self.results.num_ignored + self.results.num_ofailed + \
self.results.num_opassed + self.results.num_passed + self.results.num_flaky
self.results.num_opassed + self.results.num_passed + self.results.num_flaky + \
self.results.num_opassedfips + self.results.num_ofailedfips
# Now start building XML document
root = ET.Element('testsuites', attrib=dict(
name="ducktape", time=str(self.results.run_time_seconds),
Expand All @@ -230,7 +243,7 @@ def report(self):
name=name, classname=test.cls_name, time=str(test.run_time_seconds),
status=str(test.test_status), assertions=""
))
if test.test_status == FAIL or test.test_status == OFAIL:
if test.test_status == FAIL or test.test_status == OFAIL or test.test_status == OFAILFIPS:
xml_failure = ET.SubElement(xml_testcase, 'failure', attrib=dict(
message=test.summary.splitlines()[0]
))
Expand Down Expand Up @@ -297,6 +310,8 @@ def format_report(self):
flaky_result_string = []
opassed_result_string = []
ofailed_result_string = []
opassedfips_result_string = []
ofailedfips_result_string = []

for result in self.results:
json_string = json.dumps(self.format_result(result))
Expand All @@ -319,6 +334,12 @@ def format_report(self):
elif result.test_status == OFAIL:
ofailed_result_string.append(json_string)
ofailed_result_string.append(",")
elif result.test_status == OPASSFIPS:
opassedfips_result_string.append(json_string)
opassedfips_result_string.append(",")
elif result.test_status == OFAILFIPS:
ofailedfips_result_string.append(json_string)
ofailedfips_result_string.append(",")
else:
raise Exception("Unknown test status in report: {}".format(result.test_status.to_json()))

Expand All @@ -331,6 +352,8 @@ def format_report(self):
'num_ignored': self.results.num_ignored,
'num_opassed': self.results.num_opassed,
'num_ofailed': self.results.num_ofailed,
'num_opassedfips': self.results.num_opassedfips,
'num_ofailedfips': self.results.num_ofailedfips,
'run_time': format_time(self.results.run_time_seconds),
'session': self.results.session_context.session_id,
'passed_tests': "".join(passed_result_string),
Expand All @@ -339,8 +362,10 @@ def format_report(self):
'ignored_tests': "".join(ignored_result_string),
'ofailed_tests': "".join(ofailed_result_string),
'opassed_tests': "".join(opassed_result_string),
'ofailedfips_tests': "".join(ofailedfips_result_string),
'opassedfips_tests': "".join(opassedfips_result_string),
'test_status_names': ",".join(["\'%s\'" % str(status) for status in
[PASS, FAIL, IGNORE, FLAKY, OPASS, OFAIL]])
[PASS, FAIL, IGNORE, FLAKY, OPASS, OFAIL, OPASSFIPS, OFAILFIPS]])
}

html = template % args
Expand Down
12 changes: 11 additions & 1 deletion ducktape/tests/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from ducktape.tests.reporter import SingleResultFileReporter
from ducktape.utils.local_filesystem_utils import mkdir_p
from ducktape.utils.util import ducktape_version
from ducktape.tests.status import FLAKY, PASS, FAIL, IGNORE, OPASS, OFAIL
from ducktape.tests.status import FLAKY, PASS, FAIL, IGNORE, OPASS, OFAIL, OPASSFIPS, OFAILFIPS


class TestResult(object):
Expand Down Expand Up @@ -174,6 +174,14 @@ def num_opassed(self):
def num_ofailed(self):
return len([r for r in self._results if r.test_status == OFAIL])

@property
def num_opassedfips(self):
return len([r for r in self._results if r.test_status == OPASSFIPS])

@property
def num_ofailedfips(self):
return len([r for r in self._results if r.test_status == OFAILFIPS])

@property
def run_time_seconds(self):
if self.start_time < 0:
Expand Down Expand Up @@ -232,6 +240,8 @@ def to_json(self):
"num_ignored": self.num_ignored,
"num_opassed": self.num_opassed,
"num_ofailed": self.num_ofailed,
"num_opassedfips": self.num_opassedfips,
"num_ofailedfips": self.num_ofailedfips,
"parallelism": parallelism,
"results": [r for r in self._results]
}
Expand Down
9 changes: 8 additions & 1 deletion ducktape/tests/runner_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from ducktape.tests.status import FLAKY
from ducktape.tests.test import test_logger, TestContext

from ducktape.tests.result import TestResult, IGNORE, PASS, FAIL, OPASS, OFAIL
from ducktape.tests.result import TestResult, IGNORE, PASS, FAIL, OPASS, OFAIL, OPASSFIPS, OFAILFIPS
from ducktape.utils.local_filesystem_utils import mkdir_p


Expand Down Expand Up @@ -185,12 +185,16 @@ def _do_run(self, num_runs):

if self.test_context.ok_to_fail:
test_status = OPASS
elif self.test_context.ok_to_fail_fips:
test_status = OPASSFIPS
else:
test_status = PASS

except BaseException as e:
if self.test_context.ok_to_fail:
test_status = OFAIL
elif self.test_context.ok_to_fail_fips:
test_status = OFAILFIPS
else:
test_status = FAIL
err_trace = self._exc_msg(e)
Expand Down Expand Up @@ -249,6 +253,9 @@ def _check_cluster_utilization(self, result, summary):
elif result == OPASS:
self.log(logging.INFO, "OFAIL: " + message)
result = OFAIL
elif result == OPASSFIPS:
self.log(logging.INFO, "OFAILFIPS: " + message)
result = OFAILFIPS
summary += message
else:
self.log(logging.WARN, message)
Expand Down
2 changes: 2 additions & 0 deletions ducktape/tests/status.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,5 @@ def to_json(self):
IGNORE = TestStatus("ignore")
OPASS = TestStatus("opass")
OFAIL = TestStatus("ofail")
OPASSFIPS = TestStatus("opassfips")
OFAILFIPS = TestStatus("ofailfips")
11 changes: 7 additions & 4 deletions ducktape/tests/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from ducktape.services.service_registry import ServiceRegistry
from ducktape.template import TemplateRenderer
from ducktape.mark.resource import CLUSTER_SPEC_KEYWORD, CLUSTER_SIZE_KEYWORD
from ducktape.tests.status import FAIL, OFAIL
from ducktape.tests.status import FAIL, OFAIL, OFAILFIPS


class Test(TemplateRenderer):
Expand Down Expand Up @@ -151,7 +151,8 @@ def copy_service_logs(self, test_status):
# Gather locations of logs to collect
node_logs = []
for log_name in log_dirs.keys():
if test_status == FAIL or test_status == OFAIL or self.should_collect_log(log_name, service):
if test_status == FAIL or test_status == OFAIL or test_status == OFAILFIPS or \
self.should_collect_log(log_name, service):
node_logs.append(log_dirs[log_name]["path"])

self.test_context.logger.debug("Preparing to copy logs from %s: %s" %
Expand Down Expand Up @@ -305,6 +306,7 @@ def __init__(self, **kwargs):
self.injected_args = kwargs.get("injected_args")
self.ignore = kwargs.get("ignore", False)
self.ok_to_fail = kwargs.get("ok_to_fail", False)
self.ok_to_fail_fips = kwargs.get("ok_to_fail_fips", False)

# cluster_use_metadata is a dict containing information about how this test will use cluster resources
self.cluster_use_metadata = copy.copy(kwargs.get("cluster_use_metadata", {}))
Expand All @@ -321,9 +323,10 @@ def __init__(self, **kwargs):
def __repr__(self):
return \
"<module=%s, cls=%s, function=%s, injected_args=%s, file=%s, ignore=%s, " \
"ok_to_fail=%s, cluster_size=%s, cluster_spec=%s>" % \
"ok_to_fail=%s, ok_to_fail_fips=%s cluster_size=%s, cluster_spec=%s>" % \
(self.module, self.cls_name, self.function_name, str(self.injected_args), str(self.file),
str(self.ignore), str(self.ok_to_fail), str(self.expected_num_nodes), str(self.expected_cluster_spec))
str(self.ignore), str(self.ok_to_fail), str(self.ok_to_fail_fips), str(self.expected_num_nodes),
str(self.expected_cluster_spec))

def copy(self, **kwargs):
"""Construct a new TestContext object from another TestContext object
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
jinja2==2.11.2
boto3==1.26.62
boto3@ git+https://github.com/redpanda-data/boto3@5770b4da8f758ceaa47b6482a19569f29739704e
# jinja2 pulls in MarkupSafe with a > constraint, but we need to constrain it for compatibility
MarkupSafe<2.0.0
pyparsing<3.0.0
Expand Down
Loading