Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Development: Enhance coverage report script #9153

Merged
merged 9 commits into from
Aug 5, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions supporting_scripts/generate_code_cov_table/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,14 @@ The output will be separated into client and server sections, each containing a
- Line Coverage: Percentage of lines covered by tests
- Confirmation (assert/expect): A checkbox for manual confirmation of test coverage

### Printing

The output can be printed to the commandline using the `--print-results` option.

N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
### Linux

On Linux, `pyperclip` requires either `xclip` or `xsel` to be installed.
Alternatively, the Python modules `qtpy` or `PyQT5` have to be present.
If no option to insert text into the clipboard is found, the script falls back to printing to stdout.

N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
**You will have to manually adjust the confirmation column for each file!**
236 changes: 159 additions & 77 deletions supporting_scripts/generate_code_cov_table/generate_code_cov_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,36 +20,43 @@

repo = "Artemis"
owner = "ls1intum"
repo_path = "../.." # relative to this file
runs_url = f'https://api.github.com/repos/{owner}/{repo}/actions/runs'
runs_url = f"https://api.github.com/repos/{owner}/{repo}/actions/runs"

server_tests_key = 'Coverage Report Server Tests'
client_tests_key = 'Coverage Report Client Tests'
server_tests_key = "Coverage Report Server Tests"
client_tests_key = "Coverage Report Client Tests"


def environ_or_required(key, required=True):
# https://stackoverflow.com/a/45392259/4306257
return (
{'default': os.environ[key]} if key in os.environ and os.environ[key] != ""
else {'required': required}
{"default": os.environ[key]}
if key in os.environ and os.environ[key] != ""
else {"required": required}
)


def download_and_extract_zip(url, headers, key):
if url is None:
return None
try:
response = requests.get(url, headers=headers, stream=True)
response = requests.get(url, headers=headers, stream=True, timeout=10)
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0))
total_size = int(response.headers.get("content-length", 0))
except requests.RequestException:
logging.error(f"Failed to process ZIP file from {url}. The content might not be a valid ZIP format or is corrupted.")
logging.error(
f"Failed to process ZIP file from {url}. The content might not be a valid ZIP format or is corrupted."
)
N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
return None

try:
chunk_size = 1024
data_stream = BytesIO()
with tqdm(total=total_size, unit='B', unit_scale=True, desc="Downloading ZIP for: " + key) as progress_bar:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
desc="Downloading ZIP for: " + key,
) as progress_bar:
for chunk in response.iter_content(chunk_size=chunk_size):
data_stream.write(chunk)
progress_bar.update(len(chunk))
Expand All @@ -65,9 +72,10 @@ def download_and_extract_zip(url, headers, key):

def get_html_content(zip_file_bytes, file_name):
try:
with zipfile.ZipFile(zip_file_bytes, 'r') as zip_ref:
with zip_ref.open(file_name + '.html') as file_data:
return file_data.read().decode('utf-8')
with zipfile.ZipFile(zip_file_bytes, "r") as zip_ref, zip_ref.open(
file_name + ".html"
) as file_data:
return file_data.read().decode("utf-8")
except KeyError:
logging.error(f"File {file_name} not found in the archive.")
return None
Expand All @@ -77,30 +85,40 @@ def get_html_content(zip_file_bytes, file_name):


def get_artifacts_of_the_last_completed_run(headers, branch, run_id):
response = requests.get(runs_url, headers=headers, params={'branch': branch})
response = requests.get(
runs_url, headers=headers, params={"branch": branch}, timeout=10
)

if response.status_code == 200:
runs = response.json()['workflow_runs']
runs = response.json()["workflow_runs"]

completed_runs = list(run for run in runs if run['status'] == 'completed' and run['name'] == 'Test')
completed_runs = list(
run
for run in runs
if run["status"] == "completed" and run["name"] == "Test"
)
if len(completed_runs) == 0:
logging.error("No completed test runs found.")
sys.exit(1)

if run_id is None:
latest_completed_workflow = max(completed_runs, key=lambda x: x['created_at'], default=None)
artifacts_url = latest_completed_workflow['artifacts_url']
latest_completed_workflow = max(
completed_runs, key=lambda x: x["created_at"], default=None
N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
)
artifacts_url = latest_completed_workflow["artifacts_url"]
else:
filtered_runs = [run for run in runs if str(run['id']) == str(run_id)]
filtered_runs = [run for run in runs if str(run["id"]) == str(run_id)]
if len(filtered_runs) == 0:
logging.error("No run found with the specified ID.")
sys.exit(1)
elif len(filtered_runs) > 1:
logging.error("Multiple runs found with the same ID. Using the first one.")
artifacts_url = filtered_runs[0]['artifacts_url']
logging.error(
"Multiple runs found with the same ID. Using the first one."
)
artifacts_url = filtered_runs[0]["artifacts_url"]

response = requests.get(artifacts_url, headers=headers)
artifacts = response.json()['artifacts']
response = requests.get(artifacts_url, headers=headers, timeout=10)
artifacts = response.json()["artifacts"]

if not artifacts:
logging.error("No artifacts found.")
Expand All @@ -116,22 +134,26 @@ def get_artifacts_of_the_last_completed_run(headers, branch, run_id):


def get_coverage_artifact_for_key(artifacts, key):
matching_artifacts = list(artifact for artifact in artifacts if artifact['name'] == key)
matching_artifacts = [artifact for artifact in artifacts if artifact["name"] == key]

if len(matching_artifacts) == 1:
return matching_artifacts[0]['archive_download_url']
return matching_artifacts[0]["archive_download_url"]
else:
logging.error("Expected exactly one artifact, found {} for key: {}".format(len(matching_artifacts), key))
logging.error(
"Expected exactly one artifact, found {} for key: {}".format(
len(matching_artifacts), key
)
)
return None


def get_branch_name():
repo = git.Repo(repo_path)
def get_branch_name(repo_path: str) -> str:
repo = git.Repo(repo_path, search_parent_directories=True)
return repo.active_branch.name


def get_changed_files(branch_name, base_branch_name="origin/develop"):
repo = git.Repo(repo_path)
def get_changed_files(repo_path, branch_name, base_branch_name="origin/develop"):
repo = git.Repo(repo_path, search_parent_directories=True)
try:
branch_head = repo.commit(branch_name)
except Exception as e:
Expand All @@ -151,11 +173,11 @@ def get_changed_files(branch_name, base_branch_name="origin/develop"):
for diff in diff_index:
file_path = diff.a_path if diff.a_path else diff.b_path
file_changes[file_path] = {
'A': 'added',
'D': 'deleted',
'M': 'modified',
'R': 'renamed'
}.get(diff.change_type, 'unknown')
"A": "added",
"D": "deleted",
"M": "modified",
"R": "renamed",
}.get(diff.change_type, "unknown")

return file_changes

Expand All @@ -165,14 +187,18 @@ def filter_file_changes(file_changes):
server_file_changes = {}

for file_name, change_type in file_changes.items():
if file_name.startswith("src/main/webapp/app"):
if file_name.endswith(".ts") and not file_name.endswith("module.ts"):
client_file_changes[file_name[len("src/main/webapp/"):]] = change_type
continue
elif file_name.startswith("src/main/java/de/tum/in/www1/artemis"):
if file_name.endswith(".java"):
server_file_changes[file_name[len("src/main/java/"):]] = change_type
continue
if (
file_name.startswith("src/main/webapp/app")
and file_name.endswith(".ts")
and not file_name.endswith("module.ts")
):
client_file_changes[file_name[len("src/main/webapp/") :]] = change_type
continue
elif file_name.startswith(
"src/main/java/de/tum/in/www1/artemis"
N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
) and file_name.endswith(".java"):
server_file_changes[file_name[len("src/main/java/") :]] = change_type
continue
logging.debug(f"Skipping {file_name}")
return client_file_changes, server_file_changes

Expand Down Expand Up @@ -248,64 +274,109 @@ def main(argv):
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--token", help="GitHub Token with \"Public Repository Access\" permissions (optional, will be prompted if not provided)",
**environ_or_required("TOKEN", required=False)
"--token",
help='GitHub Token with "Public Repository Access" permissions (optional, will be prompted if not provided)',
**environ_or_required("TOKEN", required=False),
)
parser.add_argument(
"--branch-name", default=None,
help="Name of the Git branch you want to compare with the base branch (default: origin/develop)"
"--branch-name",
default=None,
help="Name of the Git branch you want to compare with the base branch (default: origin/develop)",
)
parser.add_argument(
"--base-branch-name", default="origin/develop", help="Name of the Git base branch (default: origin/develop)"
"--base-branch-name",
default="origin/develop",
help="Name of the Git base branch (default: origin/develop)",
)
parser.add_argument(
"--build-id", default=None, help="Build ID of the GitHub run id"
)
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
parser.add_argument(
"--verbose", action="store_true", help="Enable verbose logging"
"--print-results",
action="store_true",
help="Print the report to console instead of copying to clipboard",
)
parser.add_argument(
"--print-results", action="store_true",
help="Print the report to console instead of copying to clipboard"
"--repo-path",
default="./",
help="Path to the repository (default: current working directory)",
N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
)

args = parser.parse_args(argv)
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if args.token is None:
args.token = getpass.getpass("Please enter your GitHub token: ")
if args.branch_name is None:
args.branch_name = get_branch_name()
logging.info(f"Using current branch: {args.branch_name}")
if args.base_branch_name is None:
args.base_branch_name = 'origin/develop'
logging.info(f"Base branch set to: {args.base_branch_name}")
args.base_branch_name = "origin/develop"
logging.info("Base branch set to: %s", args.base_branch_name)
if args.build_id is None:
logging.info("Using latest build ID")
try:
if args.branch_name is None:
args.branch_name = get_branch_name(args.repo_path)
file_changes = get_changed_files(
args.repo_path, args.branch_name, args.base_branch_name
)
except git.exc.InvalidGitRepositoryError:
logging.exception("%s is not part of a valid Git repository.", args.repo_path)
sys.exit(1)

file_changes = get_changed_files(args.branch_name, args.base_branch_name)
client_file_changes, server_file_changes = filter_file_changes(file_changes)

if args.token is None:
args.token = getpass.getpass("Please enter your GitHub token: ")
logging.info("Using current branch: %s", args.branch_name)

headers = {
'Authorization': f'token {args.token}',
'Accept': 'application/vnd.github.v3+json'
"Authorization": f"token {args.token}",
"Accept": "application/vnd.github.v3+json",
}

artifacts = get_artifacts_of_the_last_completed_run(headers, args.branch_name, args.build_id)
artifacts = get_artifacts_of_the_last_completed_run(
headers, args.branch_name, args.build_id
)

client_coverage_zip_bytes = download_and_extract_zip(get_coverage_artifact_for_key(artifacts, client_tests_key), headers, client_tests_key)
server_coverage_zip_bytes = download_and_extract_zip(get_coverage_artifact_for_key(artifacts, server_tests_key), headers, server_tests_key)
client_coverage_zip_bytes = download_and_extract_zip(
get_coverage_artifact_for_key(artifacts, client_tests_key),
headers,
client_tests_key,
)
server_coverage_zip_bytes = download_and_extract_zip(
get_coverage_artifact_for_key(artifacts, server_tests_key),
headers,
server_tests_key,
)

with logging_redirect_tqdm():
client_cov = [
get_client_line_coverage(client_coverage_zip_bytes, file_name, change_type)
for file_name, change_type in tqdm(client_file_changes.items(), desc="Building client coverage", unit="files")
] if client_coverage_zip_bytes is not None else None

server_cov = [
get_server_line_coverage(server_coverage_zip_bytes, file_name, change_type)
for file_name, change_type in tqdm(server_file_changes.items(), desc="Building server coverage", unit="files")
] if server_coverage_zip_bytes is not None else None
client_cov = (
[
get_client_line_coverage(
client_coverage_zip_bytes, file_name, change_type
)
for file_name, change_type in tqdm(
client_file_changes.items(),
desc="Building client coverage",
unit="files",
)
]
if client_coverage_zip_bytes is not None
else None
)

server_cov = (
[
get_server_line_coverage(
server_coverage_zip_bytes, file_name, change_type
)
for file_name, change_type in tqdm(
server_file_changes.items(),
desc="Building server coverage",
unit="files",
)
]
if server_coverage_zip_bytes is not None
else None
)

client_table = coverage_to_table(client_cov)
server_table = coverage_to_table(server_cov)
Expand All @@ -316,15 +387,26 @@ def main(argv):
if server_file_changes:
result += f"#### Server\n\n{server_table}\n\n"

logging.info("Info: ✅ ❌ in Confirmation (assert/expect) have to be adjusted manually, also delete trivial files!")
logging.info(
"Info: ✅ ❌ in Confirmation (assert/expect) have to be adjusted manually, also delete trivial files!"
)
logging.info("") # newline

if args.print_results:
print(result)
else:
result_utf16 = result.encode('utf-16le') + b'\x00\x00'
pyperclip.copy(result_utf16.decode('utf-16le'))
logging.info("Code coverage report copied to clipboard. Use --print-results to print it to console instead.")
try:
result_utf16 = result.encode("utf-16le") + b"\x00\x00"
pyperclip.copy(result_utf16.decode("utf-16le"))
logging.info(
"Code coverage report copied to clipboard. Use --print-results to print it to console instead."
)
except pyperclip.PyperclipException as e:
logging.exception(
"Failed to copy the code coverage report to clipboard:\n%s\n\nPrinted results instead. Use --print-results to directly print it to console.",
e,
N0W0RK marked this conversation as resolved.
Show resolved Hide resolved
)
print(result)


if __name__ == "__main__":
Expand Down
Loading