Skip to content

Commit

Permalink
tooling: Add all pytests checker (#16031)
Browse files Browse the repository at this point in the history
another breakout from #15833

Signed-off-by: Ryan Northey <ryan@synca.io>
  • Loading branch information
phlax authored Apr 22, 2021
1 parent 048f3b8 commit 15d71b0
Show file tree
Hide file tree
Showing 10 changed files with 551 additions and 45 deletions.
9 changes: 1 addition & 8 deletions ci/do_ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -468,14 +468,7 @@ elif [[ "$CI_TARGET" == "cve_scan" ]]; then
exit 0
elif [[ "$CI_TARGET" == "tooling" ]]; then
echo "Run pytest tooling tests..."
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_pytest -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_coverage -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_checker -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_runner -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_utils -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:pytest_python_check -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:pytest_pip_check -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:python_coverage -- --fail-under=95 /tmp/.coverage-envoy /source/generated/tooling
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:all_pytests -- --cov-html /source/generated/tooling "${ENVOY_SRCDIR}"
exit 0
elif [[ "$CI_TARGET" == "verify_examples" ]]; then
run_ci_verify "*" wasm-cc
Expand Down
9 changes: 5 additions & 4 deletions tools/base/checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,11 +222,12 @@ def warn(self, name: str, warnings: list, log: bool = True) -> None:
self.log.warning("\n".join(warnings))


class ForkingChecker(Checker):
class ForkingChecker(runner.ForkingRunner, Checker):
pass

@cached_property
def fork(self):
return runner.ForkingAdapter(self)

class BazelChecker(runner.BazelRunner, Checker):
pass


class CheckerSummary(object):
Expand Down
52 changes: 50 additions & 2 deletions tools/base/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
("error", logging.ERROR))


class BazelRunError(Exception):
pass


class LogFilter(logging.Filter):

def filter(self, rec):
Expand Down Expand Up @@ -80,9 +84,53 @@ def __init__(self, context: Runner):
self.context = context

def __call__(self, *args, **kwargs) -> subprocess.CompletedProcess:
return self.fork(*args, **kwargs)
return self.subproc_run(*args, **kwargs)

def fork(self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess:
def subproc_run(
self, *args, capture_output: bool = True, **kwargs) -> subprocess.CompletedProcess:
"""Fork a subprocess, using self.context.path as the cwd by default"""
kwargs["cwd"] = kwargs.get("cwd", self.context.path)
return subprocess.run(*args, capture_output=capture_output, **kwargs)


class BazelAdapter(object):

def __init__(self, context: "ForkingRunner"):
self.context = context

def query(self, query: str) -> list:
"""Run a bazel query and return stdout as list of lines"""
resp = self.context.subproc_run(["bazel", "query", f"'{query}'"])
if resp.returncode:
raise BazelRunError(f"Bazel query failed: {resp}")
return resp.stdout.decode("utf-8").split("\n")

def run(
self,
target: str,
*args,
capture_output: bool = False,
cwd: str = "",
raises: bool = True) -> subprocess.CompletedProcess:
"""Run a bazel target and return the subprocess response"""
args = (("--",) + args) if args else args
bazel_args = ("bazel", "run", target) + args
resp = self.context.subproc_run(
bazel_args, capture_output=capture_output, cwd=cwd or self.context.path)
if resp.returncode and raises:
raise BazelRunError(f"Bazel run failed: {resp}")
return resp


class ForkingRunner(Runner):

@cached_property
def subproc_run(self) -> ForkingAdapter:
return ForkingAdapter(self)


class BazelRunner(ForkingRunner):

@cached_property
def bazel(self) -> BazelAdapter:
return BazelAdapter(self)
45 changes: 30 additions & 15 deletions tools/base/tests/test_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

import pytest

from tools.base.checker import Checker, CheckerSummary, ForkingChecker
from tools.base.checker import BazelChecker, Checker, CheckerSummary, ForkingChecker
from tools.base.runner import BazelRunner, ForkingRunner


class DummyChecker(Checker):
Expand All @@ -11,6 +12,18 @@ def __init__(self):
self.args = PropertyMock()


class DummyForkingChecker(ForkingChecker):

def __init__(self):
self.args = PropertyMock()


class DummyBazelChecker(BazelChecker):

def __init__(self):
self.args = PropertyMock()


class DummyCheckerWithChecks(Checker):
checks = ("check1", "check2")

Expand Down Expand Up @@ -497,20 +510,6 @@ def test_checker_succeed(patches, log, success):
assert not m_log.return_value.info.called


# ForkingChecker tests

def test_forkingchecker_fork():
checker = ForkingChecker("path1", "path2", "path3")
forking_mock = patch("tools.base.checker.runner.ForkingAdapter")

with forking_mock as m_fork:
assert checker.fork == m_fork.return_value
assert (
list(m_fork.call_args)
== [(checker,), {}])
assert "fork" in checker.__dict__


# CheckerSummary tests

def test_checker_summary_constructor():
Expand Down Expand Up @@ -639,3 +638,19 @@ def _extra(prob):
assert (
list(list(c) for c in m_section.call_args_list)
== expected)


# ForkingChecker test

def test_forkingchecker_constructor():
checker = DummyForkingChecker()
assert isinstance(checker, ForkingRunner)
assert isinstance(checker, Checker)


# BazelChecker test

def test_bazelchecker_constructor():
checker = DummyBazelChecker()
assert isinstance(checker, BazelRunner)
assert isinstance(checker, Checker)
159 changes: 145 additions & 14 deletions tools/base/tests/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@ def __init__(self):
self.args = PropertyMock()


class DummyForkingRunner(runner.ForkingRunner):

def __init__(self):
self.args = PropertyMock()


def test_runner_constructor():
run = runner.Runner("path1", "path2", "path3")
assert run._args == ("path1", "path2", "path3")
Expand Down Expand Up @@ -71,8 +77,8 @@ def test_runner_log(patches):
prefix="tools.base.runner")

with patched as (m_logger, m_stream, m_filter, m_level, m_name):
_loggers = (MagicMock(), MagicMock())
m_stream.side_effect = _loggers
loggers = (MagicMock(), MagicMock())
m_stream.side_effect = loggers
assert run.log == m_logger.return_value
assert (
list(m_logger.return_value.setLevel.call_args)
Expand All @@ -82,17 +88,17 @@ def test_runner_log(patches):
== [[(sys.stdout,), {}],
[(sys.stderr,), {}]])
assert (
list(_loggers[0].setLevel.call_args)
list(loggers[0].setLevel.call_args)
== [(logging.DEBUG,), {}])
assert (
list(_loggers[0].addFilter.call_args)
list(loggers[0].addFilter.call_args)
== [(m_filter.return_value,), {}])
assert (
list(_loggers[1].setLevel.call_args)
list(loggers[1].setLevel.call_args)
== [(logging.WARN,), {}])
assert (
list(list(c) for c in m_logger.return_value.addHandler.call_args_list)
== [[(_loggers[0],), {}], [(_loggers[1],), {}]])
== [[(loggers[0],), {}], [(loggers[1],), {}]])
assert "log" in run.__dict__


Expand Down Expand Up @@ -169,18 +175,115 @@ class DummyRecord(object):
assert not logfilter.filter(DummyRecord())


# BazelAdapter tests

def test_bazeladapter_constructor():
run = DummyRunner()
adapter = runner.BazelAdapter(run)
assert adapter.context == run


@pytest.mark.parametrize("query_returns", [0, 1])
def test_bazeladapter_query(query_returns):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run")

with fork_mock as m_fork:
m_fork.return_value.returncode = query_returns
if query_returns:
with pytest.raises(runner.BazelRunError) as result:
adapter.query("BAZEL QUERY")
else:
result = adapter.query("BAZEL QUERY")

assert (
list(m_fork.call_args)
== [(['bazel', 'query', "'BAZEL QUERY'"],), {}])

if query_returns:
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel query failed: {m_fork.return_value}",))
assert not m_fork.return_value.stdout.decode.called
else:
assert (
result
== m_fork.return_value.stdout.decode.return_value.split.return_value)
assert (
list(m_fork.return_value.stdout.decode.call_args)
== [('utf-8',), {}])
assert (
list(m_fork.return_value.stdout.decode.return_value.split.call_args)
== [('\n',), {}])


@pytest.mark.parametrize("cwd", [None, "", "SOMEPATH"])
@pytest.mark.parametrize("raises", [None, True, False])
@pytest.mark.parametrize("capture_output", [None, True, False])
@pytest.mark.parametrize("run_returns", [0, 1])
@pytest.mark.parametrize("args", [(), ("foo",), ("foo", "bar")])
def test_bazeladapter_run(patches, run_returns, cwd, raises, args, capture_output):
run = DummyForkingRunner()
adapter = runner.BazelAdapter(run)
patched = patches(
"ForkingAdapter.subproc_run",
("ForkingRunner.path", dict(new_callable=PropertyMock)),
prefix="tools.base.runner")

adapter_args = ("BAZEL RUN",) + args
kwargs = {}
if raises is not None:
kwargs["raises"] = raises
if cwd is not None:
kwargs["cwd"] = cwd
if capture_output is not None:
kwargs["capture_output"] = capture_output

with patched as (m_fork, m_path):
m_fork.return_value.returncode = run_returns
if run_returns and (raises is not False):
with pytest.raises(runner.BazelRunError) as result:
adapter.run(*adapter_args, **kwargs)
else:
result = adapter.run(*adapter_args, **kwargs)

call_args = (("--",) + args) if args else args
bazel_args = ("bazel", "run", "BAZEL RUN") + call_args
bazel_kwargs = {}
bazel_kwargs["capture_output"] = (
True
if capture_output is True
else False)
bazel_kwargs["cwd"] = (
cwd
if cwd
else m_path.return_value)
assert (
list(m_fork.call_args)
== [(bazel_args,), bazel_kwargs])
if run_returns and (raises is not False):
assert result.errisinstance(runner.BazelRunError)
assert (
result.value.args
== (f"Bazel run failed: {m_fork.return_value}",))
else:
assert result == m_fork.return_value


# ForkingAdapter tests

def test_forkingadapter_constructor():
_runner = DummyRunner()
adapter = runner.ForkingAdapter(_runner)
assert adapter.context == _runner
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
assert adapter.context == run


def test_forkingadapter_call():
_runner = DummyRunner()
adapter = runner.ForkingAdapter(_runner)
fork_mock = patch("tools.base.runner.ForkingAdapter.fork")
run = DummyRunner()
adapter = runner.ForkingAdapter(run)
fork_mock = patch("tools.base.runner.ForkingAdapter.subproc_run")

with fork_mock as m_fork:
assert (
Expand All @@ -199,7 +302,7 @@ def test_forkingadapter_call():
@pytest.mark.parametrize("args", [(), ("a", "b")])
@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"])
@pytest.mark.parametrize("capture_output", ["NONE", True, False])
def test_forkingadapter_fork(patches, args, cwd, capture_output):
def test_forkingadapter_subproc_run(patches, args, cwd, capture_output):
adapter = runner.ForkingAdapter(DummyRunner())
patched = patches(
"subprocess.run",
Expand All @@ -212,7 +315,7 @@ def test_forkingadapter_fork(patches, args, cwd, capture_output):
kwargs["cwd"] = cwd
if capture_output != "NONE":
kwargs["capture_output"] = capture_output
assert adapter.fork(*args, **kwargs) == m_run.return_value
assert adapter.subproc_run(*args, **kwargs) == m_run.return_value

expected = {'capture_output': True, 'cwd': cwd}
if capture_output is False:
Expand All @@ -222,3 +325,31 @@ def test_forkingadapter_fork(patches, args, cwd, capture_output):
assert (
list(m_run.call_args)
== [args, expected])


# ForkingRunner tests

def test_forkingrunner_fork():
run = runner.ForkingRunner("path1", "path2", "path3")
forking_mock = patch("tools.base.runner.ForkingAdapter")

with forking_mock as m_fork:
assert run.subproc_run == m_fork.return_value
assert (
list(m_fork.call_args)
== [(run,), {}])
assert "subproc_run" in run.__dict__


# BazelRunner tests

def test_bazelrunner_bazel():
run = runner.BazelRunner("path1", "path2", "path3")
bazel_mock = patch("tools.base.runner.BazelAdapter")

with bazel_mock as m_bazel:
assert run.bazel == m_bazel.return_value
assert (
list(m_bazel.call_args)
== [(run,), {}])
assert "bazel" in run.__dict__
2 changes: 1 addition & 1 deletion tools/code_format/python_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def on_check_run(self, check: str) -> None:

def on_checks_complete(self) -> int:
if self.diff_file_path and self.has_failed:
result = self.fork(["git", "diff", "HEAD"])
result = self.subproc_run(["git", "diff", "HEAD"])
with open(self.diff_file_path, "wb") as f:
f.write(result.stdout)
return super().on_checks_complete()
Expand Down
Loading

0 comments on commit 15d71b0

Please sign in to comment.