Skip to content

Commit

Permalink
chore(tests): tests recording also generates "results web page"
Browse files Browse the repository at this point in the history
  • Loading branch information
Martin Novak authored and marnova committed May 30, 2022
1 parent 0388268 commit a69e43d
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 32 deletions.
32 changes: 23 additions & 9 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def client(

def pytest_sessionstart(session: pytest.Session) -> None:
ui_tests.read_fixtures()
if session.config.getoption("ui") == "test":
if session.config.getoption("ui"):
testreport.clear_dir()


Expand All @@ -181,13 +181,19 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -
return

missing = session.config.getoption("ui_check_missing")
if session.config.getoption("ui") == "test":
test_ui = session.config.getoption("ui")

if test_ui == "test":
if missing and ui_tests.list_missing():
session.exitstatus = pytest.ExitCode.TESTS_FAILED
ui_tests.write_fixtures_suggestion(missing)
testreport.index()
if session.config.getoption("ui") == "record":
ui_tests.write_fixtures(missing)
if test_ui == "record":
if exitstatus == pytest.ExitCode.OK:
ui_tests.write_fixtures(missing)
else:
ui_tests.write_fixtures_suggestion(missing, only_passed_tests=True)
testreport.index()


def pytest_terminal_summary(
Expand Down Expand Up @@ -216,6 +222,13 @@ def pytest_terminal_summary(
print("See", ui_tests.SUGGESTION_FILE)
println("")

if ui_option == "record" and exitstatus != pytest.ExitCode.OK:
println(
f"\n-------- WARNING! Recording to {ui_tests.HASH_FILE.name} was disabled due to failed tests. --------"
)
print("See", ui_tests.SUGGESTION_FILE, "for suggestions for ONLY PASSED tests.")
println("")

if _should_write_ui_report(exitstatus):
println("-------- UI tests summary: --------")
println("Run ./tests/show_results.py to open test summary")
Expand Down Expand Up @@ -278,7 +291,7 @@ def pytest_runtest_teardown(item: pytest.Item) -> None:
Dumps the current UI test report HTML.
"""
if item.session.config.getoption("ui") == "test":
if item.session.config.getoption("ui"):
testreport.index()


Expand All @@ -298,12 +311,13 @@ def device_handler(client: Client, request: pytest.FixtureRequest) -> None:
device_handler = BackgroundDeviceHandler(client)
yield device_handler

# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
# did not create the attribute we need
if not hasattr(request.node, "rep_call"):
# get call test result
test_res = ui_tests.get_last_call_test_result(request)

if test_res is None:
return

# if test finished, make sure all background tasks are done
finalized_ok = device_handler.check_finalize()
if request.node.rep_call.passed and not finalized_ok: # type: ignore [rep_call must exist]
if test_res and not finalized_ok: # type: ignore [rep_call must exist]
raise RuntimeError("Test did not check result of background task")
52 changes: 39 additions & 13 deletions tests/ui_tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Dict, Generator, Set
from typing import Dict, Generator, Optional, Set

import pytest
from _pytest.outcomes import Failed
Expand All @@ -22,6 +22,7 @@
FILE_HASHES: Dict[str, str] = {}
ACTUAL_HASHES: Dict[str, str] = {}
PROCESSED: Set[str] = set()
FAILED_TESTS: Set[str] = set()

# T1/TT, to be set in screen_recording(), as we do not know it beforehand
# TODO: it is not the cleanest, we could create a class out of this file
Expand All @@ -44,9 +45,11 @@ def get_test_name(node_id: str) -> str:

def _process_recorded(screen_path: Path, test_name: str) -> None:
# calculate hash
FILE_HASHES[test_name] = _hash_files(screen_path)
actual_hash = _hash_files(screen_path)
FILE_HASHES[test_name] = actual_hash
ACTUAL_HASHES[test_name] = actual_hash
_rename_records(screen_path)
PROCESSED.add(test_name)
testreport.recorded(screen_path, test_name, actual_hash)


def _rename_records(screen_path: Path) -> None:
Expand Down Expand Up @@ -74,8 +77,6 @@ def _get_bytes_from_png(png_file: str) -> bytes:


def _process_tested(fixture_test_path: Path, test_name: str) -> None:
PROCESSED.add(test_name)

actual_path = fixture_test_path / "actual"
actual_hash = _hash_files(actual_path)
ACTUAL_HASHES[test_name] = actual_hash
Expand All @@ -102,6 +103,15 @@ def _process_tested(fixture_test_path: Path, test_name: str) -> None:
testreport.passed(fixture_test_path, test_name, actual_hash)


def get_last_call_test_result(request: pytest.FixtureRequest) -> Optional[bool]:
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
# did not create the attribute we need
if not hasattr(request.node, "rep_call"):
return None

return request.node.rep_call.passed


@contextmanager
def screen_recording(
client: Client, request: pytest.FixtureRequest
Expand Down Expand Up @@ -141,10 +151,15 @@ def screen_recording(
client.init_device()
client.debug.stop_recording()

if test_ui == "record":
_process_recorded(screen_path, test_name)
else:
_process_tested(screens_test_path, test_name)
if test_ui:
PROCESSED.add(test_name)
if get_last_call_test_result(request) is False:
FAILED_TESTS.add(test_name)

if test_ui == "record":
_process_recorded(screen_path, test_name)
else:
_process_tested(screens_test_path, test_name)


def list_missing() -> Set[str]:
Expand All @@ -166,17 +181,28 @@ def write_fixtures(remove_missing: bool) -> None:
HASH_FILE.write_text(_get_fixtures_content(FILE_HASHES, remove_missing))


def write_fixtures_suggestion(remove_missing: bool) -> None:
SUGGESTION_FILE.write_text(_get_fixtures_content(ACTUAL_HASHES, remove_missing))
def write_fixtures_suggestion(
remove_missing: bool, only_passed_tests: bool = False
) -> None:
SUGGESTION_FILE.write_text(
_get_fixtures_content(ACTUAL_HASHES, remove_missing, only_passed_tests)
)


def _get_fixtures_content(fixtures: Dict[str, str], remove_missing: bool) -> str:
def _get_fixtures_content(
fixtures: Dict[str, str], remove_missing: bool, only_passed_tests: bool = False
) -> str:
if remove_missing:
# Not removing the ones for different model
nonrelevant_cases = {
f: h for f, h in FILE_HASHES.items() if not f.startswith(f"{MODEL}_")
}
processed_fixtures = {i: fixtures[i] for i in PROCESSED}

filtered_processed_tests = PROCESSED
if only_passed_tests:
filtered_processed_tests = PROCESSED - FAILED_TESTS

processed_fixtures = {i: fixtures[i] for i in filtered_processed_tests}
fixtures = {**nonrelevant_cases, **processed_fixtures}
else:
fixtures = fixtures
Expand Down
21 changes: 15 additions & 6 deletions tests/ui_tests/reporting/html.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import filecmp
from itertools import zip_longest
from pathlib import Path
from typing import Dict, List
from typing import Dict, List, Optional

from dominate.tags import a, i, img, table, td, th, tr

Expand Down Expand Up @@ -30,7 +30,7 @@ def write(fixture_test_path: Path, doc, filename: str) -> Path:
return fixture_test_path / filename


def image(src: Path) -> None:
def image(src: Path, image_width: Optional[int] = None) -> None:
with td():
if src:
# open image file
Expand All @@ -40,17 +40,26 @@ def image(src: Path) -> None:
# convert output to str
image = image.decode()
# img(src=src.relative_to(fixture_test_path))
img(src="data:image/png;base64, " + image)
img(
src="data:image/png;base64, " + image,
style=f"width: {image_width}px; image-rendering: pixelated;"
if image_width
else "",
)
else:
i("missing")


def diff_table(left_screens: List[Path], right_screens: List[Path]) -> None:
def diff_table(
left_screens: List[Path],
right_screens: List[Path],
image_width: Optional[int] = None,
) -> None:
for left, right in zip_longest(left_screens, right_screens):
if left and right and filecmp.cmp(right, left):
background = "white"
else:
background = "red"
with tr(bgcolor=background):
image(left)
image(right)
image(left, image_width)
image(right, image_width)
20 changes: 16 additions & 4 deletions tests/ui_tests/reporting/testreport.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@

STYLE = (HERE / "testreport.css").read_text()
SCRIPT = (HERE / "testreport.js").read_text()
SCREENSHOTS_WIDTH_PX_TO_DISPLAY = {
"T1": 128 * 2, # original is 128px
"TT": 240, # original is 240px
"TR": 128 * 2, # original is 128px
}

ACTUAL_HASHES: Dict[str, str] = {}

Expand Down Expand Up @@ -147,17 +152,24 @@ def failed(
th("Expected")
th("Actual")

html.diff_table(recorded_screens, actual_screens)
html.diff_table(
recorded_screens,
actual_screens,
SCREENSHOTS_WIDTH_PX_TO_DISPLAY[test_name[:2]],
)

return html.write(REPORTS_PATH / "failed", doc, test_name + ".html")


def passed(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:
copy_tree(str(fixture_test_path / "actual"), str(fixture_test_path / "recorded"))

return recorded(fixture_test_path / "actual", test_name, actual_hash)


def recorded(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:
doc = document(title=test_name)
actual_path = fixture_test_path / "actual"
actual_screens = sorted(actual_path.iterdir())
actual_screens = sorted(fixture_test_path.iterdir())

with doc:
_header(test_name, actual_hash, actual_hash)
Expand All @@ -168,6 +180,6 @@ def passed(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:

for screen in actual_screens:
with tr():
html.image(screen)
html.image(screen, SCREENSHOTS_WIDTH_PX_TO_DISPLAY[test_name[:2]])

return html.write(REPORTS_PATH / "passed", doc, test_name + ".html")

0 comments on commit a69e43d

Please sign in to comment.