Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] python: Integrate linting/utils/tests #15833

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 1 addition & 7 deletions ci/do_ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -468,13 +468,7 @@ elif [[ "$CI_TARGET" == "cve_scan" ]]; then
exit 0
elif [[ "$CI_TARGET" == "tooling" ]]; then
echo "Run pytest tooling tests..."
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_pytest -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:pytest_python_coverage -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_checker -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_runner -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/base:pytest_utils -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:pytest_python_check -- --cov-collect /tmp/.coverage-envoy
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:python_coverage -- --fail-under=95 /tmp/.coverage-envoy /source/generated/tooling
bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/testing:all_pytests -- --cov-html /source/generated/tooling "${ENVOY_SRCDIR}"
exit 0
elif [[ "$CI_TARGET" == "verify_examples" ]]; then
run_ci_verify "*" wasm-cc
Expand Down
29 changes: 29 additions & 0 deletions tools/base/checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
from tools.base import runner


class BazelRunError(Exception):
pass


class Checker(runner.Runner):
"""Runs check methods prefixed with `check_` and named in `self.checks`

Expand Down Expand Up @@ -229,6 +233,31 @@ def fork(self):
return runner.ForkingAdapter(self)


class BazelChecker(ForkingChecker):

def bazel_query(self, query: str) -> list:
"""Run a bazel query and return stdout as list of lines"""
resp = self.fork(["bazel", "query", f"'{query}'"])
if resp.returncode:
raise BazelRunError(f"Bazel query failed: {resp}")
return resp.stdout.decode("utf-8").split("\n")

def bazel_run(
self,
target: str,
*args,
capture_output: bool = False,
cwd: str = "",
raises: bool = True) -> subprocess.CompletedProcess:
"""Run a bazel target and return the subprocess response"""
args = (("--",) + args) if args else args
bazel_args = ("bazel", "run", target) + args
resp = self.fork(bazel_args, capture_output=capture_output, cwd=cwd or self.path)
if resp.returncode and raises:
raise BazelRunError(f"Bazel run failed: {resp}")
return resp


class CheckerSummary(object):

def __init__(self, checker: Checker):
Expand Down
116 changes: 115 additions & 1 deletion tools/base/tests/test_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

import pytest

from tools.base.checker import Checker, CheckerSummary, ForkingChecker
from tools.base.checker import (
BazelChecker, BazelRunError,
Checker, CheckerSummary, ForkingChecker)


class DummyChecker(Checker):
Expand Down Expand Up @@ -511,6 +513,118 @@ def test_forkingchecker_fork():
assert "fork" in checker.__dict__


@pytest.mark.parametrize("args", [(), ("a", "b")])
@pytest.mark.parametrize("cwd", [None, "NONE", "PATH"])
@pytest.mark.parametrize("capture_output", ["NONE", True, False])
def test_forkingchecker_fork(patches, args, cwd, capture_output):
checker = ForkingChecker("path1", "path2", "path3")
patched = patches(
"subprocess.run",
("Checker.path", dict(new_callable=PropertyMock)),
prefix="tools.base.checker")

with patched as (m_run, m_path):
kwargs = {}
if cwd != "NONE":
kwargs["cwd"] = cwd
if capture_output != "NONE":
kwargs["capture_output"] = capture_output
assert checker.fork(*args, **kwargs) == m_run.return_value

expected = {'capture_output': True, 'cwd': cwd}
if capture_output is False:
expected["capture_output"] = False
if cwd == "NONE":
expected["cwd"] = m_path.return_value
assert (
list(m_run.call_args)
== [args, expected])


# BazelChecker tests

@pytest.mark.parametrize("returns", [0, 1])
def test_bazelchecker_bazel_query(returns):
checker = BazelChecker("path1", "path2", "path3")
fork_mock = patch("tools.base.checker.ForkingChecker.fork")

with fork_mock as m_fork:
m_fork.return_value.returncode = returns
if returns:
with pytest.raises(BazelRunError) as e:
checker.bazel_query("QUERY")
assert (
e.value.args
== (f"Bazel query failed: {m_fork.return_value}",))
else:
result = checker.bazel_query("QUERY")
assert (
list(m_fork.call_args)
== [(['bazel', 'query', "'QUERY'"],), {}])
if returns:
return
assert (
result
== m_fork.return_value.stdout.decode.return_value.split.return_value)
assert (
list(m_fork.return_value.stdout.decode.call_args)
== [('utf-8',), {}])
assert (
list(m_fork.return_value.stdout.decode.return_value.split.call_args)
== [('\n',), {}])


@pytest.mark.parametrize("args", [(), ("foo", ), ("foo", "bar", "baz")])
@pytest.mark.parametrize("raises", [None, True, False])
@pytest.mark.parametrize("returns", [0, 1])
@pytest.mark.parametrize("cwd", [None, "", "PATH"])
@pytest.mark.parametrize("capture_output", [None, True, False])
def test_bazelchecker_bazel_run(patches, args, raises, returns, cwd, capture_output):
checker = BazelChecker("path1", "path2", "path3")
patched = patches(
"ForkingChecker.fork",
("Checker.path", dict(new_callable=PropertyMock)),
prefix="tools.base.checker")

with patched as (m_fork, m_path):
m_fork.return_value.returncode = returns
kwargs = {}
if cwd is not None:
kwargs["cwd"] = cwd
if raises is not None:
kwargs["raises"] = raises
if capture_output is not None:
kwargs["capture_output"] = capture_output

if raises is False or not returns:
assert (
checker.bazel_run("TARGET", *args, **kwargs)
== m_fork.return_value)
else:
with pytest.raises(BazelRunError) as e:
checker.bazel_run("TARGET", *args, **kwargs)
assert (
e.value.args
== (f"Bazel run failed: {m_fork.return_value}",))
_cwd = (
m_path.return_value
if not cwd
else cwd)
_capture_output = (
False
if not capture_output
else capture_output)
_args = (
('bazel', 'run', 'TARGET', '--') + args
if args
else ('bazel', 'run', 'TARGET'))
assert (
list(m_fork.call_args)
== [(_args,),
{'capture_output': _capture_output,
'cwd': _cwd}])


# CheckerSummary tests

def test_checker_summary_constructor():
Expand Down
1 change: 1 addition & 0 deletions tools/base/tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import importlib
import sys
from contextlib import contextmanager
from unittest.mock import patch

import pytest

Expand Down
8 changes: 4 additions & 4 deletions tools/dependency/BUILD
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
load("@rules_python//python:defs.bzl", "py_binary")
load("@deps_pip3//:requirements.bzl", "requirement")
load("//bazel:envoy_build_system.bzl", "envoy_package")
load("//tools/base:envoy_python.bzl", "envoy_py_binary")

licenses(["notice"]) # Apache 2

Expand Down Expand Up @@ -46,11 +47,10 @@ py_binary(
],
)

py_binary(
name = "pip_check",
srcs = ["pip_check.py"],
visibility = ["//visibility:public"],
envoy_py_binary(
name = "tools.dependency.pip_check",
deps = [
"//tools/base:checker",
requirement("PyYaml"),
],
)
101 changes: 53 additions & 48 deletions tools/dependency/pip_check.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,36 @@
#!/usr/bin/python3
#!/usr/bin/env python3

# usage
#
# with bazel:
#
# bazel //tools/dependency:pip_check -- -h
#
# alternatively, if you have the necessary python deps available
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no deps required

#
# ./tools/dependency/pip_check.py -h
#

import os
import sys
from functools import cached_property

import yaml

from tools.base import checker

DEPENDABOT_CONFIG = ".github/dependabot.yml"
REQUIREMENTS_FILENAME = "requirements.txt"

# TODO(phlax): add checks for:
# - requirements can be installed together
# - pip-compile formatting (maybe do that in code_format)


# TODO(phlax): move this to a base module
class Checker(object):
"""Runs check methods prefixed with `check_` and named in `self.checks`

check methods should return the count of errors and handle writing to stderr
"""
checks = ()

def __init__(self, path: str):
self.path = path

def error_lines(self, errors: list) -> str:
"""Transform a list of errors to a string for output"""
return "\n - ".join([""] + errors)

def run_checks(self) -> int:
"""Run all configured checks and return the sum of their error counts"""
return sum(getattr(self, f"check_{check}")() for check in self.checks)
# - pip-compile formatting

def write_errors(self, errors: list, pre: str, post: str) -> None:
"""Write errors to stderr with pre/post ambles"""
sys.stderr.write(f"\n{pre}: \n{self.error_lines(errors)}\n\n{post}\n\n")


class PipChecker(Checker):
class PipChecker(checker.Checker):
checks = ("dependabot",)
_dependabot_config = DEPENDABOT_CONFIG
_requirements_filename = REQUIREMENTS_FILENAME

@cached_property
def config_requirements(self) -> set:
Expand All @@ -51,44 +43,57 @@ def config_requirements(self) -> set:
@cached_property
def dependabot_config(self) -> dict:
"""Parsed dependabot config"""
with open(os.path.join(self.path, DEPENDABOT_CONFIG)) as f:
with open(os.path.join(self.path, self.dependabot_config_path)) as f:
return yaml.safe_load(f.read())

@property
def dependabot_config_path(self) -> str:
return self._dependabot_config

@cached_property
def requirements_dirs(self) -> set:
"""Set of found directories in the repo containing requirements.txt"""
return set(
root[len(self.path):]
for root, dirs, files in os.walk(self.path)
if "requirements.txt" in files)
if self.requirements_filename in files)

@property
def requirements_filename(self) -> str:
return self._requirements_filename

def check_dependabot(self) -> int:
def check_dependabot(self) -> None:
"""Check that dependabot config matches requirements.txt files found in repo"""
missing_dirs = self.config_requirements - self.requirements_dirs
missing_config = self.requirements_dirs - self.config_requirements

correct = self.requirements_dirs & self.config_requirements
if correct:
self.dependabot_success(correct)
if missing_dirs:
self.dependabot_errors(
missing_dirs,
f"Missing {self.requirements_filename} dir, specified in dependabot config")
if missing_config:
self.write_errors(
sorted(missing_config), "Missing requirements config for",
"Either add the missing config or remove the file/s")
self.dependabot_errors(
missing_config,
f"Missing dependabot config for {self.requirements_filename} in dir")

if missing_dirs:
self.write_errors(
sorted(missing_dirs), "Missing requirements files for",
"Either add the missing file/s or remove the config")
def dependabot_success(self, correct: list) -> None:
self.succeed(
"dependabot", ([
f"Correct dependabot config for {self.requirements_filename} in dir: {dirname}"
for dirname in sorted(correct)
]))

return len(missing_config | missing_dirs)
def dependabot_errors(self, missing: list, msg: str) -> None:
self.error(
"dependabot",
([f"[ERROR:{self.name}] (dependabot) {msg}: {dirname}" for dirname in sorted(missing)]))


def main(path: str) -> None:
errors = PipChecker(path).run_checks()
if errors:
raise SystemExit(f"Pip checks failed: {errors} errors")
def main(*args) -> int:
return PipChecker(*args).run_checks()


if __name__ == "__main__":
try:
path = sys.argv[1]
except IndexError:
raise SystemExit("Pip check tool must be called with a path argument")
main(path)
sys.exit(main(*sys.argv[1:]))
Loading