forked from DataDog/system-tests
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conftest.py
391 lines (282 loc) · 13.8 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
import json
import os
import time
import pytest
from pytest_jsonreport.plugin import JSONReport
from manifests.parser.core import load as load_manifests
from utils import context
from utils._context._scenarios import scenarios
from utils.tools import logger
from utils.scripts.junit_report import junit_modifyreport
from utils._context.library_version import LibraryVersion
from utils._decorators import released
# Monkey patch JSON-report plugin to avoid noise in report
JSONReport.pytest_terminal_summary = lambda *args, **kwargs: None
# pytest does not keep a trace of deselected items, so we keep it in a global variable
_deselected_items = []
def pytest_addoption(parser):
parser.addoption(
"--scenario", "-S", type=str, action="store", default="DEFAULT", help="Unique identifier of scenario"
)
parser.addoption("--replay", "-R", action="store_true", help="Replay tests based on logs")
parser.addoption("--sleep", action="store_true", help="Startup scenario without launching the tests (keep running)")
parser.addoption(
"--force-execute", "-F", action="append", default=[], help="Item to execute, even if they are skipped"
)
# Onboarding scenarios mandatory parameters
parser.addoption("--vm-weblog", type=str, action="store", help="Set virtual machine weblog")
parser.addoption("--vm-library", type=str, action="store", help="Set virtual machine library to test")
parser.addoption("--vm-env", type=str, action="store", help="Set virtual machine environment")
parser.addoption("--vm-provider", type=str, action="store", help="Set provider for VMs")
parser.addoption("--vm-only-branch", type=str, action="store", help="Filter to execute only one vm branch")
parser.addoption("--vm-skip-branches", type=str, action="store", help="Filter exclude vm branches")
# report data to feature parity dashboard
parser.addoption(
"--report-run-url", type=str, action="store", default=None, help="URI of the run who produced the report",
)
parser.addoption(
"--report-environment", type=str, action="store", default=None, help="The environment the test is run under",
)
def pytest_configure(config):
# handle options that can be filled by environ
if not config.option.report_environment and "SYSTEM_TESTS_REPORT_ENVIRONMENT" in os.environ:
config.option.report_environment = os.environ["SYSTEM_TESTS_REPORT_ENVIRONMENT"]
if not config.option.report_run_url and "SYSTEM_TESTS_REPORT_RUN_URL" in os.environ:
config.option.report_run_url = os.environ["SYSTEM_TESTS_REPORT_RUN_URL"]
# First of all, we must get the current scenario
for name in dir(scenarios):
if name.upper() == config.option.scenario:
context.scenario = getattr(scenarios, name)
break
if context.scenario is None:
pytest.exit(f"Scenario {config.option.scenario} does not exists", 1)
context.scenario.configure(config)
if not config.option.replay and not config.option.collectonly:
config.option.json_report_file = f"{context.scenario.host_log_folder}/report.json"
config.option.xmlpath = f"{context.scenario.host_log_folder}/reportJunit.xml"
# Called at the very begening
def pytest_sessionstart(session):
# get the terminal to allow logging directly in stdout
setattr(logger, "terminal", session.config.pluginmanager.get_plugin("terminalreporter"))
if session.config.option.sleep:
logger.terminal.write("\n ********************************************************** \n")
logger.terminal.write(" *** .:: Sleep mode activated. Press Ctrl+C to exit ::. *** ")
logger.terminal.write("\n ********************************************************** \n\n")
if session.config.option.collectonly:
return
context.scenario.session_start()
# called when each test item is collected
def _collect_item_metadata(item):
result = {
"details": None,
"testDeclaration": None,
"features": [marker.kwargs["feature_id"] for marker in item.iter_markers("features")],
}
# get the reason form skip before xfail
markers = [*item.iter_markers("skip"), *item.iter_markers("skipif"), *item.iter_markers("xfail")]
for marker in markers:
skip_reason = _get_skip_reason_from_marker(marker)
if skip_reason is not None:
# if any irrelevant declaration exists, it is the one we need to expose
if skip_reason.startswith("irrelevant"):
result["details"] = skip_reason
# otherwise, we keep the first one we found
elif result["details"] is None:
result["details"] = skip_reason
if result["details"]:
logger.debug(f"{item.nodeid} => {result['details']} => skipped")
if result["details"].startswith("irrelevant"):
result["testDeclaration"] = "irrelevant"
elif result["details"].startswith("flaky"):
result["testDeclaration"] = "flaky"
elif result["details"].startswith("bug"):
result["testDeclaration"] = "bug"
elif result["details"].startswith("missing_feature"):
result["testDeclaration"] = "notImplemented"
else:
raise ValueError(f"Unexpected test declaration for {result['path']} : {result['details']}")
return result
def _get_skip_reason_from_marker(marker):
if marker.name == "skipif":
if all(marker.args):
return marker.kwargs.get("reason", "")
elif marker.name in ("skip", "xfail"):
if len(marker.args): # if un-named arguments are present, the first one is the reason
return marker.args[0]
# otherwise, search in named arguments
return marker.kwargs.get("reason", "")
return None
def pytest_pycollect_makemodule(module_path, parent):
# As now, declaration only works for tracers at module level
library = context.scenario.library.library
manifests = load_manifests()
nodeid = str(module_path.relative_to(module_path.cwd()))
if nodeid in manifests and library in manifests[nodeid]:
declaration = manifests[nodeid][library]
logger.info(f"Manifest declaration found for {nodeid}: {declaration}")
mod: pytest.Module = pytest.Module.from_parent(parent, path=module_path)
if declaration.startswith("irrelevant") or declaration.startswith("flaky"):
mod.add_marker(pytest.mark.skip(reason=declaration))
logger.debug(f"Module {nodeid} is skipped by manifest file because {declaration}")
else:
mod.add_marker(pytest.mark.xfail(reason=declaration))
logger.debug(f"Module {nodeid} is xfailed by manifest file because {declaration}")
return mod
@pytest.hookimpl(tryfirst=True)
def pytest_pycollect_makeitem(collector, name, obj):
if collector.istestclass(obj, name):
manifest = load_manifests()
nodeid = f"{collector.nodeid}::{name}"
if nodeid in manifest:
declaration = manifest[nodeid]
logger.info(f"Manifest declaration found for {nodeid}: {declaration}")
released(**declaration)(obj)
def pytest_collection_modifyitems(session, config, items):
"""unselect items that are not included in the current scenario"""
logger.debug("pytest_collection_modifyitems")
selected = []
deselected = []
for item in items:
scenario_markers = list(item.iter_markers("scenario"))
declared_scenario = scenario_markers[0].args[0] if len(scenario_markers) != 0 else "DEFAULT"
# If we are running scenario with the option sleep, we deselect all
if session.config.option.sleep:
deselected.append(item)
continue
if declared_scenario == context.scenario.name:
logger.info(f"{item.nodeid} is included in {context.scenario}")
selected.append(item)
for forced in config.option.force_execute:
if item.nodeid.startswith(forced):
logger.info(f"{item.nodeid} is normally skipped, but forced thanks to -F {forced}")
item.own_markers = [m for m in item.own_markers if m.name not in ("skip", "skipif")]
else:
logger.debug(f"{item.nodeid} is not included in {context.scenario}")
deselected.append(item)
items[:] = selected
config.hook.pytest_deselected(items=deselected)
def pytest_deselected(items):
_deselected_items.extend(items)
def _item_is_skipped(item):
return any(item.iter_markers("skip"))
def pytest_collection_finish(session):
from utils import weblog
if session.config.option.collectonly:
return
if session.config.option.sleep: # on this mode, we simply sleep, not running any test or setup
try:
while True:
time.sleep(1)
except KeyboardInterrupt: # catching ctrl+C
context.scenario.close_targets()
return
except Exception as e:
raise e
last_item_file = ""
for item in session.items:
if _item_is_skipped(item):
continue
if not item.instance: # item is a method bounded to a class
continue
# the test metohd name is like test_xxxx
# we replace the test_ by setup_, and call it if it exists
setup_method_name = f"setup_{item.name[5:]}"
if not hasattr(item.instance, setup_method_name):
continue
item_file = item.nodeid.split(":", 1)[0]
if last_item_file != item_file:
if len(last_item_file) == 0:
logger.terminal.write_sep("-", "tests setup", bold=True)
logger.terminal.write(f"\n{item_file} ")
last_item_file = item_file
setup_method = getattr(item.instance, setup_method_name)
logger.debug(f"Call {setup_method} for {item}")
try:
weblog.current_nodeid = item.nodeid
setup_method()
weblog.current_nodeid = None
except Exception:
logger.exception("Unexpected failure during setup method call")
logger.terminal.write("x", bold=True, red=True)
context.scenario.close_targets()
raise
else:
logger.terminal.write(".", bold=True, green=True)
finally:
weblog.current_nodeid = None
logger.terminal.write("\n\n")
context.scenario.post_setup()
def pytest_runtest_call(item):
from utils import weblog
if item.nodeid in weblog.responses:
for response in weblog.responses[item.nodeid]:
request = response["request"]
if "method" in request:
logger.info(f"weblog {request['method']} {request['url']} -> {response['status_code']}")
else:
logger.info("weblog GRPC request")
@pytest.hookimpl(optionalhook=True)
def pytest_json_runtest_metadata(item, call):
if call.when != "setup":
return {}
return _collect_item_metadata(item)
def pytest_json_modifyreport(json_report):
try:
# add usefull data for reporting
json_report["context"] = context.serialize()
logger.debug("Modifying JSON report finished")
except:
logger.error("Fail to modify json report", exc_info=True)
def pytest_sessionfinish(session, exitstatus):
context.scenario.pytest_sessionfinish(session)
if session.config.option.collectonly or session.config.option.replay:
return
# xdist: pytest_sessionfinish function runs at the end of all tests. If you check for the worker input attribute,
# it will run in the master thread after all other processes have finished testing
if not hasattr(session.config, "workerinput"):
with open(f"{context.scenario.host_log_folder}/known_versions.json", "w", encoding="utf-8") as f:
json.dump(
{library: sorted(versions) for library, versions in LibraryVersion.known_versions.items()}, f, indent=2,
)
data = session.config._json_report.report # pylint: disable=protected-access
junit_modifyreport(
data, session.config.option.xmlpath, junit_properties=context.scenario.get_junit_properties(),
)
export_feature_parity_dashboard(session, data)
def export_feature_parity_dashboard(session, data):
result = {
"runUrl": session.config.option.report_run_url or "https://github.com/DataDog/system-tests",
"runDate": data["created"],
"environment": session.config.option.report_environment or "local",
"testSource": "systemtests",
"language": context.scenario.library.library,
"variant": context.scenario.weblog_variant,
"testedDependencies": [
{"name": name, "version": str(version)} for name, version in context.scenario.components.items()
],
"scenario": context.scenario.name,
"tests": [convert_test_to_feature_parity_model(test) for test in data["tests"]],
}
context.scenario.customize_feature_parity_dashboard(result)
with open(f"{context.scenario.host_log_folder}/feature_parity.json", "w", encoding="utf-8") as f:
json.dump(result, f, indent=2)
def convert_test_to_feature_parity_model(test):
result = {
"path": test["nodeid"],
"lineNumber": test["lineno"],
"outcome": test["outcome"],
"testDeclaration": test["metadata"]["testDeclaration"],
"details": test["metadata"]["details"],
"features": test["metadata"]["features"],
}
return result
## Fixtures corners
@pytest.fixture(scope="session", name="session")
def fixture_session(request):
return request.session
@pytest.fixture(scope="session", name="deselected_items")
def fixture_deselected_items():
return _deselected_items