From 3ca65b61759a0544b806b2d8e9fcf79b604f30f5 Mon Sep 17 00:00:00 2001 From: Grzegorz Bokota Date: Tue, 23 Jul 2024 02:00:12 +0200 Subject: [PATCH] Update release scripts for 0.5.0 release (#5) * udpade find_contributtors script to better handle multiple repository configuration * move GH_TOKEN related exception to `get_github` * improve adding additional repos to find contributors without citation * add warning about additional_notes * add option to add pr from docs * Add header and highlights paragraphs * don't use full repo specifier * Merge main, docs, and first time contributors * Allow using header snippet * uncomment main repo crawling * Fix typo in commit link f-string * Remove warning and add date * Update highlights section * Run black * Use ruff-fmt instead of black * fix format and ignore E501 --------- Co-authored-by: Juan Nunez-Iglesias --- .pre-commit-config.yaml | 9 +- add_login_to_citation_cff.py | 42 +++-- additional_notes/0.5.0/header.md | 10 ++ additional_notes/0.5.0/highlights.md | 49 +++++ cherry_pick_process.py | 62 ++++--- docs_cherry_pick.py | 40 +++-- filter_opened_bug_issues.py | 56 +++--- filter_pr_that_may_be_selected.py | 53 +++--- find_all_undeleted_branches.py | 18 +- find_contributors_without_citation.py | 79 +++++--- find_pre_commit_updates.py | 13 +- generate_release_notes.py | 247 +++++++++++++++----------- list_opened_pr.py | 8 +- pyproject.toml | 12 +- release_utils.py | 107 ++++++----- sort_citation_cff.py | 15 +- 16 files changed, 497 insertions(+), 323 deletions(-) create mode 100644 additional_notes/0.5.0/header.md create mode 100644 additional_notes/0.5.0/highlights.md diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 074a8d3..46609e2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,15 +8,10 @@ repos: - id: end-of-file-fixer exclude: patch_dir - id: check-yaml -- repo: https://github.com/psf/black-pre-commit-mirror - rev: 23.10.1 - hooks: - - id: black - pass_filenames: true - exclude: _vendor|vendored|examples - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.1.3 + rev: v0.4.8 hooks: - id: ruff exclude: _vendor|vendored args: [--output-format, github] + - id: ruff-format diff --git a/add_login_to_citation_cff.py b/add_login_to_citation_cff.py index 9b302b3..1cc43c3 100644 --- a/add_login_to_citation_cff.py +++ b/add_login_to_citation_cff.py @@ -2,6 +2,7 @@ This is a script for adding logins to the existing CITATION.cff file. This simplifies future updating of this file. It creates a backup file with .bck suffix/ """ + from __future__ import annotations import argparse @@ -23,7 +24,7 @@ ) LOCAL_DIR = Path(__file__).parent -DEFAULT_CORRECTION_FILE = LOCAL_DIR / "name_corrections.yaml" +DEFAULT_CORRECTION_FILE = LOCAL_DIR / 'name_corrections.yaml' def get_name(user, correction_dict): @@ -36,10 +37,12 @@ def get_name(user, correction_dict): def main(): parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument("path", help="The path to the citation file to sort", type=Path) parser.add_argument( - "--correction-file", - help="The file with the corrections", + 'path', help='The path to the citation file to sort', type=Path + ) + parser.add_argument( + '--correction-file', + help='The file with the corrections', default=DEFAULT_CORRECTION_FILE, type=existing_file, ) @@ -51,7 +54,7 @@ def main(): def add_logins(cff_path: Path, correction_file: Path | None = None) -> None: setup_cache() - with cff_path.open(encoding="utf8") as f: + with cff_path.open(encoding='utf8') as f: data = safe_load(f) contributors_iterable = get_repo().get_contributors() @@ -62,30 +65,37 @@ def add_logins(cff_path: Path, correction_file: Path | None = None) -> None: contributors = { get_name(user, correction_dict): user - for user in tqdm(contributors_iterable, total=contributors_iterable.totalCount) + for user in tqdm( + contributors_iterable, total=contributors_iterable.totalCount + ) if get_name(user, correction_dict) is not None } for user in get_repo().get_contributors(): - if get_name(user, correction_dict) is None and user.login not in BOT_LIST: - print(f"Could not find {user.login}", file=sys.stderr) + if ( + get_name(user, correction_dict) is None + and user.login not in BOT_LIST + ): + print(f'Could not find {user.login}', file=sys.stderr) # assert len(contributors) == contributors_iterable.totalCount - for i, author in enumerate(data["authors"]): - if "alias" in author: + for i, author in enumerate(data['authors']): + if 'alias' in author: continue - name = unidecode(f'{author["given-names"]} {author["family-names"]}'.lower()) + name = unidecode( + f'{author["given-names"]} {author["family-names"]}'.lower() + ) if name in contributors: - author["alias"] = contributors[name].login + author['alias'] = contributors[name].login else: - print(f"Could not find {name}", file=sys.stderr) + print(f'Could not find {name}', file=sys.stderr) - shutil.copy(str(cff_path), f"{cff_path}.bck") + shutil.copy(str(cff_path), f'{cff_path}.bck') - with cff_path.open("w", encoding="utf8") as f: + with cff_path.open('w', encoding='utf8') as f: safe_dump(data, f, sort_keys=False, allow_unicode=True) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/additional_notes/0.5.0/header.md b/additional_notes/0.5.0/header.md new file mode 100644 index 0000000..e649a41 --- /dev/null +++ b/additional_notes/0.5.0/header.md @@ -0,0 +1,10 @@ +*Thursday, Jul 11, 2024* + +We're happy to announce the release of napari 0.5.0! +napari is a fast, interactive, multi-dimensional image viewer for Python. +It's designed for exploring, annotating, and analyzing multi-dimensional +images. It's built on Qt (for the GUI), VisPy (for performant GPU-based +rendering), and the scientific Python stack (NumPy, SciPy, and friends). + +For more information, examples, and documentation, please visit our website: +https://napari.org/ diff --git a/additional_notes/0.5.0/highlights.md b/additional_notes/0.5.0/highlights.md new file mode 100644 index 0000000..b75585a --- /dev/null +++ b/additional_notes/0.5.0/highlights.md @@ -0,0 +1,49 @@ +napari 0.5.0 is the beginning of an architectural overhaul of napari. The +architecture improvements, which are still ongoing, enable more responsive +asynchronous loading when slicing layers or panning and zooming in multiscale +2D layers ([#5816](https://github.com/napari/napari/pull/5816)). There's +several performance improvements, too, including faster points layer creation +and updates ([#6727](https://github.com/napari/napari/pull/6727)). + +Other architectural changes, refactoring napari on top of +[app-model](https://app-model.readthedocs.io/en/latest/), have enabled us to +(finally 😅) implement [NAP-6](nap-6-contributable-menus), which allows +plugins to organize their commands in defined menus in the napari menubar +and application. Please read [NAP-6](nap-6-contributable-menus) for all the +juicy details, including how to request more menus if the existing ones don't +meet your needs. 📋 ([#7011](https://github.com/napari/napari/pull/7011)) + +Another important development for plugins is that we have added fields for +axis names and physical units in layers +([#6979](https://github.com/napari/napari/pull/6979)). If you implement a +reader plugin, you can now specify the names of the axes in the data that you +are reading in, and the physical units of the scale and other transformations. +Currently, napari is *not* using this information, but we will in upcoming +versions, so plugins should start providing this information if they have it. + +There's plenty of new features, too, including a polygon drawing tool when +painting labels ([#5806](https://github.com/napari/napari/pull/5806)), +pinch-to-zoom ([#5859](https://github.com/napari/napari/pull/5859)), better +ways to show/hide individual layers when exploring your data +([#5574](https://github.com/napari/napari/pull/5574)) +([#5618](https://github.com/napari/napari/pull/5618)), creating a layer from +an image or URL in your clipboard +([#6532](https://github.com/napari/napari/pull/6532)), +a new way to export figure-quality renderings from the canvas +([#6730](https://github.com/napari/napari/pull/6730)) (2D-only for now), +and the ability to copy and paste spatial metadata (scale, translate, etc) +between layers ([#6864](https://github.com/napari/napari/pull/6864)). + +You'll also note a new little button on layer controls, including images: + +```{image} ../images/transform-icon.svg +:alt: transform layer icon +:width: 100px +:align: center +``` + +This little button allows you to resize and rotate layers, enabling manual +alignment ([#6794](https://github.com/napari/napari/pull/6794))! + +All in all, this release has over 20 new features and over 100 bug fixes and +improvements. Please see below for the full list of changes since 0.4.19. diff --git a/cherry_pick_process.py b/cherry_pick_process.py index 4b574ce..a196411 100644 --- a/cherry_pick_process.py +++ b/cherry_pick_process.py @@ -2,6 +2,7 @@ """ This is script to cherry-pick commits base on PR labels """ + from __future__ import annotations import argparse @@ -28,32 +29,38 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("base_branch", help="The base branch.") - parser.add_argument("milestone", help="The milestone to list") + parser.add_argument('base_branch', help='The base branch.') + parser.add_argument('milestone', help='The milestone to list') + parser.add_argument( + '--first-commits', + help='file with list of first commits to cherry pick', + ) parser.add_argument( - "--first-commits", help="file with list of first commits to cherry pick" + '--stop-after', help='Stop after this PR', default=0, type=int ) - parser.add_argument("--stop-after", help="Stop after this PR", default=0, type=int) parser.add_argument( - "--git-main-branch", - help="The git main branch", - default=os.environ.get("GIT_RELEASE_MAIN_BRANCH", "main"), + '--git-main-branch', + help='The git main branch', + default=os.environ.get('GIT_RELEASE_MAIN_BRANCH', 'main'), ) parser.add_argument( - "--working-dir", help="path to repository", default=LOCAL_DIR, type=Path + '--working-dir', + help='path to repository', + default=LOCAL_DIR, + type=Path, ) parser.add_argument( - "--skip-commits", - nargs="+", - help="list of commits to skip as they are already cherry-picked", + '--skip-commits', + nargs='+', + help='list of commits to skip as they are already cherry-picked', type=int, ) argcomplete.autocomplete(parser) args = parser.parse_args() - target_branch = f"v{args.milestone}x" + target_branch = f'v{args.milestone}x' if args.first_commits is not None: with open(args.first_commits) as f: @@ -74,18 +81,23 @@ def main(): def prepare_repo( - working_dir: Path, target_branch: str, base_branch: str, main_branch: str = "main" + working_dir: Path, + target_branch: str, + base_branch: str, + main_branch: str = 'main', ) -> Repo: if not working_dir.exists(): - repo = Repo.clone_from(f"git@{GH}:{GH_USER}/{GH_REPO}.git", working_dir) + repo = Repo.clone_from( + f'git@{GH}:{GH_USER}/{GH_REPO}.git', working_dir + ) else: repo = Repo(LOCAL_DIR / REPO_DIR_NAME) if target_branch not in repo.branches: repo.git.checkout(base_branch) - repo.git.checkout("HEAD", b=target_branch) + repo.git.checkout('HEAD', b=target_branch) else: - repo.git.reset("--hard", "HEAD") + repo.git.reset('--hard', 'HEAD') repo.git.checkout(main_branch) repo.git.pull() repo.git.checkout(target_branch) @@ -101,7 +113,7 @@ def perform_cherry_pick( first_commits: set, stop_after: int | None, base_branch: str, - main_branch: str = "main", + main_branch: str = 'main', skip_commits: list[int] = None, ): """ @@ -141,13 +153,13 @@ def perform_cherry_pick( setup_cache() milestone = get_milestone(milestone_str) - patch_dir_path = working_dir / "patch_dir" / milestone.title + patch_dir_path = working_dir / 'patch_dir' / milestone.title patch_dir_path.mkdir(parents=True, exist_ok=True) # with short_cache(60): pr_targeted_for_release = [ x - for x in iter_pull_request(f"milestone:{milestone.title} is:merged") + for x in iter_pull_request(f'milestone:{milestone.title} is:merged') if x.milestone == milestone ] @@ -187,9 +199,9 @@ def perform_cherry_pick( # commit = repo.commit(pr_commits_dict[pull.number]) # print("hash", pr_commits_dict[pull.number]) # break - patch_file = patch_dir_path / f"{pull.number}.patch" + patch_file = patch_dir_path / f'{pull.number}.patch' if patch_file.exists(): - print(f"Apply patch {patch_file}") + print(f'Apply patch {patch_file}') repo.git.am(str(patch_file)) continue try: @@ -197,10 +209,10 @@ def perform_cherry_pick( except GitCommandError: print(pull, pr_commits_dict[pull.number]) repo.git.mergetool() - repo.git.cherry_pick("--continue") - with open(patch_file, "w") as f: - f.write(repo.git.format_patch("HEAD~1", "--stdout")) + repo.git.cherry_pick('--continue') + with open(patch_file, 'w') as f: + f.write(repo.git.format_patch('HEAD~1', '--stdout')) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/docs_cherry_pick.py b/docs_cherry_pick.py index c6ebfd4..533e6dd 100644 --- a/docs_cherry_pick.py +++ b/docs_cherry_pick.py @@ -19,32 +19,36 @@ ) parser = argparse.ArgumentParser() -parser.add_argument("milestone", help="The milestone to list") +parser.add_argument('milestone', help='The milestone to list') parser.add_argument( - "--first-commits", help="file with list of first commits to cherry pick" + '--first-commits', help='file with list of first commits to cherry pick' ) -parser.add_argument("--stop-after", help="Stop after this commit", default=0, type=int) parser.add_argument( - "--git-repository", - help="The git repository", + '--stop-after', help='Stop after this commit', default=0, type=int +) +parser.add_argument( + '--git-repository', + help='The git repository', default=os.environ.get( - "GIT_RELEASE_REPOSITORY", "git@github.com:napari/napari.git" + 'GIT_RELEASE_REPOSITORY', 'git@github.com:napari/napari.git' ), ) parser.add_argument( - "--git-main-branch", - help="The git main branch", - default=os.environ.get("GIT_RELEASE_MAIN_BRANCH", "main"), + '--git-main-branch', + help='The git main branch', + default=os.environ.get('GIT_RELEASE_MAIN_BRANCH', 'main'), ) def get_consumed_pr(): res = set() - base = repo.merge_base(f"docs_{milestone.title}", f"v{milestone.title}x") + base = repo.merge_base(f'docs_{milestone.title}', f'v{milestone.title}x') - for commit in repo.iter_commits(f"{base[0].binsha.hex()}..docs_{milestone.title}"): + for commit in repo.iter_commits( + f'{base[0].binsha.hex()}..docs_{milestone.title}' + ): if (match := PR_NUM_PATTERN.search(commit.message)) is not None: pr_num = int(match[1]) res.add(pr_num) @@ -59,21 +63,21 @@ def get_consumed_pr(): milestone = get_milestone(args.milestone) -if not (LOCAL_DIR / "patch_dir").exists(): - (LOCAL_DIR / "patch_dir").mkdir() +if not (LOCAL_DIR / 'patch_dir').exists(): + (LOCAL_DIR / 'patch_dir').mkdir() -patch_dir_path = LOCAL_DIR / "patch_dir" / f"docs_{milestone.title}" +patch_dir_path = LOCAL_DIR / 'patch_dir' / f'docs_{milestone.title}' if not patch_dir_path.exists(): patch_dir_path.mkdir() repo = Repo(LOCAL_DIR / REPO_DIR_NAME) -repo.git.checkout(f"docs_{milestone.title}") +repo.git.checkout(f'docs_{milestone.title}') pr_list_base = sorted( - iter_pull_request(f"milestone:{args.milestone} is:merged", repo="docs"), + iter_pull_request(f'milestone:{args.milestone} is:merged', repo='docs'), key=lambda x: x.closed_at, ) @@ -81,14 +85,14 @@ def get_consumed_pr(): for pull in tqdm(pr_list_base): - patch_file = patch_dir_path / f"{pull.number}.patch" + patch_file = patch_dir_path / f'{pull.number}.patch' if pull.number in skip_pr: continue print(pull.number, pull.title) if not patch_file.exists(): urllib.request.urlretrieve( - f"https://github.com/napari/docs/commit/{pull.merge_commit_sha}.patch", + f'https://github.com/napari/docs/commit/{pull.merge_commit_sha}.patch', patch_file, ) repo.git.am(str(patch_file)) diff --git a/filter_opened_bug_issues.py b/filter_opened_bug_issues.py index 060e9fc..e804a38 100644 --- a/filter_opened_bug_issues.py +++ b/filter_opened_bug_issues.py @@ -13,59 +13,61 @@ ) parser = argparse.ArgumentParser(usage=__doc__) -parser.add_argument("from_commit", help="The starting tag.") -parser.add_argument("to_commit", help="The head branch.") +parser.add_argument('from_commit', help='The starting tag.') +parser.add_argument('to_commit', help='The head branch.') parser.add_argument( - "--milestone", - help="if present then filter issues with a given milestone", + '--milestone', + help='if present then filter issues with a given milestone', default=None, type=str, ) parser.add_argument( - "--skip-triaged", - action="store_true", + '--skip-triaged', + action='store_true', default=False, - help="if present then skip triaged PRs", + help='if present then skip triaged PRs', ) -parser.add_argument("--label", help="The label", action="append") +parser.add_argument('--label', help='The label', action='append') args = parser.parse_args() if args.label is None: - args.label = ["bug"] + args.label = ['bug'] setup_cache() repository = get_repo() if args.milestone is not None: - if args.milestone.lower() == "none": - milestone_search_string = "no:milestone" + if args.milestone.lower() == 'none': + milestone_search_string = 'no:milestone' milestone = None else: milestone = get_milestone(args.milestone) milestone_search_string = f'milestone:"{milestone.title}"' else: - milestone_search_string = "" + milestone_search_string = '' milestone = None previous_tag_date = get_split_date(args.from_commit, args.to_commit) -probably_solved = repository.get_label("probably solved") -need_to_reproduce = repository.get_label("need to reproduce") +probably_solved = repository.get_label('probably solved') +need_to_reproduce = repository.get_label('need to reproduce') if args.skip_triaged: - triage_labels = [x for x in repository.get_labels() if x.name.startswith("triaged")] + triage_labels = [ + x for x in repository.get_labels() if x.name.startswith('triaged') + ] else: triage_labels = [] labels = [repository.get_label(label) for label in args.label] search_string = ( - f"repo:{GH_USER}/{GH_REPO} is:issue is:open " - f"created:>{previous_tag_date.isoformat()} " - "sort:updated-desc " + milestone_search_string + f'repo:{GH_USER}/{GH_REPO} is:issue is:open ' + f'created:>{previous_tag_date.isoformat()} ' + 'sort:updated-desc ' + milestone_search_string ) # print(search_string, file=sys.stderr) @@ -78,10 +80,10 @@ for issue in tqdm( iterable, - desc="issues...", + desc='issues...', total=iterable.totalCount, ): - if "[test-bot]" in issue.title: + if '[test-bot]' in issue.title: continue if probably_solved in issue.labels: continue @@ -93,20 +95,20 @@ issue_list.append(issue) if len(labels) > 1: - label_string = "labels " + ", ".join([x.name for x in labels]) + label_string = 'labels ' + ', '.join([x.name for x in labels]) else: - label_string = f"label {labels[0].name}" + label_string = f'label {labels[0].name}' -header = f"## {len(issue_list)} Opened Issues with {label_string}" +header = f'## {len(issue_list)} Opened Issues with {label_string}' if milestone: - if milestone_search_string.startswith("no:"): - header += " and no milestone" + if milestone_search_string.startswith('no:'): + header += ' and no milestone' else: - header += f" and milestone {milestone.title}" + header += f' and milestone {milestone.title}' print(header) for issue in issue_list: - print(f" * [ ] #{issue.number}") + print(f' * [ ] #{issue.number}') diff --git a/filter_pr_that_may_be_selected.py b/filter_pr_that_may_be_selected.py index 6545c16..78b9b04 100644 --- a/filter_pr_that_may_be_selected.py +++ b/filter_pr_that_may_be_selected.py @@ -2,6 +2,7 @@ This is supporting script for generating list of PR that may be cherry-picked for bugfix release. """ + from __future__ import annotations import argparse @@ -21,27 +22,29 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("from_commit", help="The starting tag.") - parser.add_argument("to_commit", help="The head branch.") + parser.add_argument('from_commit', help='The starting tag.') + parser.add_argument('to_commit', help='The head branch.') parser.add_argument( - "--milestone", - help="if present then filter PR with a given milestone", + '--milestone', + help='if present then filter PR with a given milestone', default=None, type=str, ) parser.add_argument( - "--label", - help="if present then filter PR with a given label", + '--label', + help='if present then filter PR with a given label', default=None, type=str, ) parser.add_argument( - "--skip-triaged", - action="store_true", + '--skip-triaged', + action='store_true', default=False, - help="if present then skip triaged PRs", + help='if present then skip triaged PRs', + ) + parser.add_argument( + '--target-branch', help='The target branch', default='' ) - parser.add_argument("--target-branch", help="The target branch", default="") args = parser.parse_args() @@ -72,23 +75,25 @@ def filter_pr( label = repository.get_label(label) if label else None repo = get_local_repo(REPO_DIR_NAME) - consumed_pr = get_consumed_pr(repo, target_branch) if target_branch else set() + consumed_pr = ( + get_consumed_pr(repo, target_branch) if target_branch else set() + ) if skip_triaged: triage_labels = [ - x for x in repository.get_labels() if x.name.startswith("triaged") + x for x in repository.get_labels() if x.name.startswith('triaged') ] else: triage_labels = [] if milestone is not None: - query = f"milestone:{milestone.title} is:merged " + query = f'milestone:{milestone.title} is:merged ' else: previous_tag_date = get_split_date(from_commit, to_commit) - query = f"merged:>{previous_tag_date.isoformat()} no:milestone " + query = f'merged:>{previous_tag_date.isoformat()} no:milestone ' if label is not None: - query += f" label:{label.name} " + query += f' label:{label.name} ' with short_cache(60): iterable = iter_pull_request(query) @@ -104,24 +109,26 @@ def filter_pr( if not pr_to_list: text = ( - f"## No PRs found with milestone {milestone.title}" + f'## No PRs found with milestone {milestone.title}' if milestone - else "## No PRs found without milestone" + else '## No PRs found without milestone' ) elif milestone: - text = f"## {len(pr_to_list)} PRs with milestone {milestone.title}" + text = f'## {len(pr_to_list)} PRs with milestone {milestone.title}' else: - text = f"## {len(pr_to_list)} PRs without milestone" + text = f'## {len(pr_to_list)} PRs without milestone' if label: - text += f" and label {label.name}" + text += f' and label {label.name}' - text += ":" + text += ':' print(text) for pull in sorted(pr_to_list, key=lambda x: x.closed_at): - print(f' * [{"x" if pull.number in consumed_pr else " "}] #{pull.number}') + print( + f' * [{"x" if pull.number in consumed_pr else " "}] #{pull.number}' + ) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/find_all_undeleted_branches.py b/find_all_undeleted_branches.py index ed409a2..f435d35 100644 --- a/find_all_undeleted_branches.py +++ b/find_all_undeleted_branches.py @@ -7,7 +7,7 @@ from release_utils import GH_REPO, GH_USER, get_github, setup_cache parser = argparse.ArgumentParser(usage=__doc__) -parser.add_argument("user_name", help="name to search fr undeleted branches") +parser.add_argument('user_name', help='name to search fr undeleted branches') args = parser.parse_args() setup_cache() @@ -15,11 +15,11 @@ user = get_github().get_user(args.user_name) pull_requests = get_github().search_issues( - f"repo:{GH_USER}/{GH_REPO} " - "is:closed " - "is:pr " - f"author:{user.login} " - "sort:created-asc" + f'repo:{GH_USER}/{GH_REPO} ' + 'is:closed ' + 'is:pr ' + f'author:{user.login} ' + 'sort:created-asc' ) to_remove_branches = [] @@ -31,9 +31,9 @@ to_remove_branches.append(pull) if not to_remove_branches: - print("No undeleted branches found") + print('No undeleted branches found') exit(0) -print(f"Found {len(to_remove_branches)} undeleted branches") +print(f'Found {len(to_remove_branches)} undeleted branches') for pull in to_remove_branches: - print(f" {pull.title} {pull.html_url}") + print(f' {pull.title} {pull.html_url}') diff --git a/find_contributors_without_citation.py b/find_contributors_without_citation.py index afff7d9..9f83401 100644 --- a/find_contributors_without_citation.py +++ b/find_contributors_without_citation.py @@ -11,6 +11,7 @@ """ import argparse +import os from tqdm import tqdm from yaml import safe_load @@ -29,59 +30,74 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("--milestone", help="The milestone to check") parser.add_argument( - "--correction-file", - help="The file with the corrections", - default=LOCAL_DIR / "name_corrections.yaml", + '--milestone', help='The milestone to check', default='' ) + parser.add_argument('--repo', help='The repo to check', action='append') + # parser.add_argument( + # "--additional-repo", help="The additional repo to check", action="append" + # ) parser.add_argument( - "--citation-path", - help="", - default=str(REPO_DIR_NAME / "CITATION.cff"), + '--correction-file', + help='The file with the corrections', + default=LOCAL_DIR / 'name_corrections.yaml', + ) + parser.add_argument( + '--citation-path', + help='', + default=str(os.path.join(REPO_DIR_NAME, 'CITATION.cff')), type=existing_file, ) parser.add_argument( - "--generate", - help="Generate the missing entries based on github data", - action="store_true", + '--generate', + help='Generate the missing entries based on github data', + action='store_true', ) args = parser.parse_args() + if args.repo is None: + args.repo = ['napari/napari', 'napari/docs'] + with args.citation_path.open() as f: citation = safe_load(f) - if args.milestone is not None: - missing_authors = find_missing_authors_for_milestone(citation, args.milestone) - else: - missing_authors = find_missing_authors(citation) + missing_authors = set() + + for repo in args.repo: + missing_authors |= find_missing_authors_for_milestone( + citation, repo, args.milestone + ) if args.generate: for login, name in sorted(missing_authors): if name is None: continue - name, sure_name = name.rsplit(" ", 1) + name, sure_name = name.rsplit(' ', 1) print( - f"- given-names: {name}\n family-names: {sure_name}\n alias: {login}" + f'- given-names: {name}\n family-names: {sure_name}\n alias: {login}' ) else: for login, name in sorted(missing_authors): - print(f"@{login} ", end="") # ({name})") + print(f'@{login} ', end='') # ({name})") print() -def find_missing_authors(citation) -> set[tuple[str, str]]: +def find_missing_authors(citation, repository: str) -> set[tuple[str, str]]: author_dict = {} - for author in citation["authors"]: - author_dict[author["alias"]] = author + for author in citation['authors']: + author_dict[author['alias']] = author setup_cache() missing_authors = set() - contributors = get_repo().get_contributors() + contributors = get_repo(*repository.split('/')).get_contributors() - for creator in tqdm(contributors, total=contributors.totalCount): + for creator in tqdm( + contributors, + total=contributors.totalCount, + desc=f'finding authors for {repository}', + ): if creator.login in BOT_LIST: continue if creator.login not in author_dict: @@ -90,18 +106,25 @@ def find_missing_authors(citation) -> set[tuple[str, str]]: def find_missing_authors_for_milestone( - citation, milestone_str: str + citation, repository: str, milestone_str: str = '' ) -> set[tuple[str, str]]: + if not milestone_str: + return find_missing_authors(citation, repository) + author_dict = {} - for author in citation["authors"]: - author_dict[author["alias"]] = author + for author in citation['authors']: + author_dict[author['alias']] = author setup_cache() - milestone = get_milestone(milestone_str) + milestone = get_milestone(milestone_str, repository) missing_authors = set() - for pull in iter_pull_request(f"milestone:{milestone.title} is:merged"): + user, repo = repository.split('/') + + for pull in iter_pull_request( + f'milestone:{milestone.title} is:merged', user, repo + ): issue = pull.as_issue() creator = issue.user if creator.login in BOT_LIST: @@ -111,5 +134,5 @@ def find_missing_authors_for_milestone( return missing_authors -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/find_pre_commit_updates.py b/find_pre_commit_updates.py index 42327a6..2c595eb 100644 --- a/find_pre_commit_updates.py +++ b/find_pre_commit_updates.py @@ -1,6 +1,7 @@ """ This script finds all pre-commit PRs that modify not only the pre-commit config """ + import argparse from release_utils import ( @@ -10,8 +11,8 @@ ) parser = argparse.ArgumentParser() -parser.add_argument("from_commit", help="The starting tag.") -parser.add_argument("to_commit", help="The head branch.") +parser.add_argument('from_commit', help='The starting tag.') +parser.add_argument('to_commit', help='The head branch.') args = parser.parse_args() @@ -22,8 +23,8 @@ pr_to_list = [] -for pull in iter_pull_request(f"merged:>{previous_tag_date.isoformat()} "): - if "[pre-commit.ci]" in pull.title and pull.changed_files > 1: +for pull in iter_pull_request(f'merged:>{previous_tag_date.isoformat()} '): + if '[pre-commit.ci]' in pull.title and pull.changed_files > 1: pr_to_list.append(pull) # find PR without milestone # if "[pre-commit.ci]" in pull.title and pull.milestone is None: @@ -31,9 +32,9 @@ if not pr_to_list: - print("No PRs found") + print('No PRs found') exit(0) for pull in sorted(pr_to_list, key=lambda x: x.closed_at): - print(f" * [ ] #{pull.number} {pull.html_url} {pull.milestone}") + print(f' * [ ] #{pull.number} {pull.html_url} {pull.milestone}') diff --git a/generate_release_notes.py b/generate_release_notes.py index 2b73c66..bc4af9d 100644 --- a/generate_release_notes.py +++ b/generate_release_notes.py @@ -1,4 +1,5 @@ """Generate the release notes automatically from GitHub pull requests. + Start with: ``` export GH_TOKEN= @@ -24,10 +25,15 @@ https://github.com/scikit-image/scikit-image/issues/3404 https://github.com/scikit-image/scikit-image/issues/3405 """ + import argparse import re import sys from pathlib import Path +from typing import NamedTuple + +from github.PullRequest import PullRequest +from github.Repository import Repository from release_utils import ( BOT_LIST, @@ -45,21 +51,40 @@ LOCAL_DIR = Path(__file__).parent +PR_REGEXP = re.compile(r'(?P[\w-]+)/(?P[\w-]+)#(?P\d+)') + + +class PRInfo(NamedTuple): + user: str + repo: str + pr: int + + +def parse_pr_num(pr_num): + if match := PR_REGEXP.match(pr_num): + return PRInfo( + match.group('user'), match.group('repo'), int(match.group('pr')) + ) + try: + return int(pr_num) + except ValueError: + raise argparse.ArgumentTypeError(f'{pr_num} is not a valid PR number.') + parser = argparse.ArgumentParser(usage=__doc__) -parser.add_argument("milestone", help="The milestone to list") -parser.add_argument("--target-directory", type=Path, default=None) +parser.add_argument('milestone', help='The milestone to list') +parser.add_argument('--target-directory', type=Path, default=None) parser.add_argument( - "--correction-file", - help="The file with the corrections", - default=LOCAL_DIR / "name_corrections.yaml", + '--correction-file', + help='The file with the corrections', + default=LOCAL_DIR / 'name_corrections.yaml', ) parser.add_argument( - "--with-pr", - help="Include PR numbers for not merged PRs", - type=int, + '--with-pr', + help='Include PR numbers for not merged PRs', + type=parse_pr_num, default=None, - nargs="+", + nargs='+', ) args = parser.parse_args() @@ -69,7 +94,9 @@ repo = get_repo() correction_dict = get_correction_dict( args.correction_file -) | get_corrections_from_citation_cff(LOCAL_DIR / REPO_DIR_NAME / "CITATION.cff") +) | get_corrections_from_citation_cff( + LOCAL_DIR / REPO_DIR_NAME / 'CITATION.cff' +) def add_to_users(users_dkt, new_user): @@ -93,37 +120,37 @@ def add_to_users(users_dkt, new_user): users = {} highlights = { - "Highlights": {}, - "New Features": {}, - "Improvements": {}, - "Performance": {}, - "Bug Fixes": {}, - "API Changes": {}, - "Deprecations": {}, - "Build Tools": {}, - "Documentation": {}, + 'Highlights': {}, + 'New Features': {}, + 'Improvements': {}, + 'Performance': {}, + 'Bug Fixes': {}, + 'API Changes': {}, + 'Deprecations': {}, + 'Build Tools': {}, + 'Documentation': {}, } other_pull_requests = {} label_to_section = { - "bug": "Bug Fixes", - "bugfix": "Bug Fixes", - "feature": "New Features", - "api": "API Changes", - "highlight": "Highlights", - "performance": "Performance", - "enhancement": "Improvements", - "deprecation": "Deprecations", - "dependencies": "Build Tools", - "documentation": "Documentation", + 'bug': 'Bug Fixes', + 'bugfix': 'Bug Fixes', + 'feature': 'New Features', + 'api': 'API Changes', + 'highlight': 'Highlights', + 'performance': 'Performance', + 'enhancement': 'Improvements', + 'deprecation': 'Deprecations', + 'dependencies': 'Build Tools', + 'documentation': 'Documentation', } -def parse_pull(pull): - assert pull.merged or pull.number in args.with_pr +def parse_pull(pull: PullRequest, repo_: Repository = repo): + # assert pull.merged or pull.number in args.with_pr - commit = repo.get_commit(pull.merge_commit_sha) + commit = repo_.get_commit(pull.merge_commit_sha) if commit.committer is not None: add_to_users(users, commit.committer) @@ -142,23 +169,35 @@ def parse_pull(pull): pr_labels = {label.name.lower() for label in pull.labels} for label_name, section in label_to_section.items(): if label_name in pr_labels: - highlights[section][pull.number] = {"summary": summary, "repo": GH_REPO} + highlights[section][pull.number] = { + 'summary': summary, + 'repo': repo_.full_name.split('/')[1], + } assigned_to_section = True if not assigned_to_section: - other_pull_requests[pull.number] = {"summary": summary, "repo": GH_REPO} + other_pull_requests[pull.number] = { + 'summary': summary, + 'repo': repo_.full_name.split('/')[1], + } -for pull_ in iter_pull_request(f"milestone:{args.milestone} is:merged"): +for pull_ in iter_pull_request(f'milestone:{args.milestone} is:merged'): parse_pull(pull_) if args.with_pr is not None: for pr_num in args.with_pr: - pull = repo.get_pull(pr_num) - parse_pull(pull) + if isinstance(pr_num, int): + pull = repo.get_pull(pr_num) + r = repo + else: + r = get_repo(pr_num.user, pr_num.repo) + pull = r.get_pull(pr_num.pr) + + parse_pull(pull, r) for pull in iter_pull_request( - f"milestone:{args.milestone} is:merged", repo=GH_DOCS_REPO + f'milestone:{args.milestone} is:merged', repo=GH_DOCS_REPO ): issue = pull.as_issue() assert pull.merged @@ -174,17 +213,20 @@ def parse_pull(pull): docs_reviewers.add(review.user.login) assigned_to_section = False pr_labels = {label.name.lower() for label in pull.labels} - if "maintenance" in pr_labels: - other_pull_requests[pull.number] = {"summary": summary, "repo": GH_DOCS_REPO} + if 'maintenance' in pr_labels: + other_pull_requests[pull.number] = { + 'summary': summary, + 'repo': GH_DOCS_REPO, + } else: - highlights["Documentation"][pull.number] = { - "summary": summary, - "repo": GH_DOCS_REPO, + highlights['Documentation'][pull.number] = { + 'summary': summary, + 'repo': GH_DOCS_REPO, } # add Other PRs to the ordered dict to make doc generation easier. -highlights["Other Pull Requests"] = other_pull_requests +highlights['Other Pull Requests'] = other_pull_requests # remove these bots. @@ -194,8 +236,8 @@ def parse_pull(pull): docs_authors -= BOT_LIST -user_name_pattern = re.compile(r"@([\w-]+)") # pattern for GitHub usernames -pr_number_pattern = re.compile(r"#(\d+)") # pattern for GitHub PR numbers +user_name_pattern = re.compile(r'@([\w-]+)') # pattern for GitHub usernames +pr_number_pattern = re.compile(r'#(\d+)') # pattern for GitHub PR numbers old_contributors = set() @@ -203,8 +245,8 @@ def parse_pull(pull): file_handle = sys.stdout else: res_file_name = f"release_{args.milestone.replace('.', '_')}.md" - file_handle = open(args.target_directory / res_file_name, "w") - for file_path in args.target_directory.glob("release_*.md"): + file_handle = open(args.target_directory / res_file_name, 'w') + for file_path in args.target_directory.glob('release_*.md'): if file_path.name == res_file_name: continue with open(file_path) as f: @@ -212,32 +254,39 @@ def parse_pull(pull): # Now generate the release notes -title = f"# napari {args.milestone}" +title = f'# napari {args.milestone}' print(title, file=file_handle) -print( - f""" +notes_dir = LOCAL_DIR / 'additional_notes' / args.milestone +if not notes_dir.glob('*.md'): + print( + 'There is no prepared sections in the additional_notes directory.', + file=sys.stderr, + ) + +if (fn := notes_dir / 'header.md').exists(): + intro = fn.open().read() +else: + intro = f""" We're happy to announce the release of napari {args.milestone}! napari is a fast, interactive, multi-dimensional image viewer for Python. It's designed for browsing, annotating, and analyzing large multi-dimensional images. It's built on top of Qt (for the GUI), vispy (for performant GPU-based rendering), and the scientific Python stack (numpy, scipy). -""", - file=file_handle, -) -print( - """ -For more information, examples, and documentation, please visit our website: -https://napari.org/stable/ -""", - file=file_handle, -) +For more information, examples, and documentation, please visit our website, +https://napari.org. +""" + +print(intro, file=file_handle) for section, pull_request_dicts in highlights.items(): - print(f"## {section}\n", file=file_handle) + print(f'## {section}\n', file=file_handle) section_path = ( - LOCAL_DIR / "additional_notes" / args.milestone / f"{section.lower()}.md" + LOCAL_DIR + / 'additional_notes' + / args.milestone + / f'{section.lower()}.md' ) mentioned_pr = set() if section_path.exists(): @@ -250,65 +299,57 @@ def parse_pull(pull): for number, pull_request_info in pull_request_dicts.items(): if number in mentioned_pr: continue - repo_str = pull_request_info["repo"] + repo_str = pull_request_info['repo'] + repo_prefix = repo_str if repo_str != 'napari' else '' print( - f'- {pull_request_info["summary"]} ([napari/{repo_str}#{number}](https://{GH}/{GH_USER}/{repo_str}/pull/{number}))', + f'- {pull_request_info["summary"]} ([{repo_prefix}#{number}]' + f"(https://{GH}/{GH_USER}/{repo_str}/pull/{number}))", file=file_handle, ) - print("", file=file_handle) + print('', file=file_handle) contributors = { - "authors": authors, - "reviewers": reviewers, - "docs authors": docs_authors, - "docs reviewers": docs_reviewers, + 'authors': authors | docs_authors, + 'reviewers': reviewers | docs_reviewers, } # ignore committers # contributors['committers'] = committers +new_contributors = (authors | docs_authors) - old_contributors for section_name, contributor_set in contributors.items(): - if section_name.startswith("docs"): - repo_name = GH_DOCS_REPO - else: - repo_name = GH_REPO - print("", file=file_handle) + print('', file=file_handle) if None in contributor_set: contributor_set.remove(None) committer_str = ( - f"## {len(contributor_set)} {section_name} added to this " - "release (alphabetical)" + f'## {len(contributor_set)} {section_name} added to this ' + 'release (alphabetical)' ) print(committer_str, file=file_handle) - print("", file=file_handle) + print('', file=file_handle) + print('(+) denotes first-time contributors 🥳') for c in sorted(contributor_set, key=lambda x: users[x].lower()): - commit_link = f"https://{GH}/{GH_USER}/{repo_name}/commits?author={c}" - print(f"- [{users[c]}]({commit_link}) - @{c}", file=file_handle) - print("", file=file_handle) - -new_contributors = (authors | docs_authors) - old_contributors - - -if old_contributors and new_contributors: - print("## New Contributors", file=file_handle) - print("", file=file_handle) - print( - f"There are {len(new_contributors)} new contributors for this release:", - file=file_handle, - ) - print("", file=file_handle) - for c in sorted(new_contributors, key=lambda x: users[x].lower()): - commit_link = f"https://{GH}/{GH_USER}/{GH_REPO}/commits?author={c}" - docs_commit_link = f"https://{GH}/{GH_USER}/docs/commits?author={c}" if c in authors and c in docs_authors: - print( - f"- {users[c]} [docs]({docs_commit_link}) " - f"[napari]({commit_link}) - @{c}", - file=file_handle, + first_repo_name = GH_REPO + second_repo_str = ( + f' ([docs](https://{GH}/{GH_USER}/' + f'{GH_DOCS_REPO}/commits?author={c})) ' ) elif c in authors: - print(f"- {users[c]} [napari]({commit_link}) - @{c}", file=file_handle) - else: - print(f"- {users[c]} [docs]({docs_commit_link}) - @{c}", file=file_handle) + first_repo_name = GH_REPO + second_repo_str = '' + else: # docs only + first_repo_name = GH_DOCS_REPO + second_repo_str = '' + + first = ' +' if c in new_contributors else '' + commit_link = ( + f'https://{GH}/{GH_USER}/{first_repo_name}/' f'commits?author={c}' + ) + print( + f'- [{users[c]}]({commit_link}){second_repo_str} - @{c}{first}', + file=file_handle, + ) + print('', file=file_handle) diff --git a/list_opened_pr.py b/list_opened_pr.py index f9c95be..3b178c1 100644 --- a/list_opened_pr.py +++ b/list_opened_pr.py @@ -7,7 +7,7 @@ ) parser = argparse.ArgumentParser() -parser.add_argument("milestone", help="The milestone to list") +parser.add_argument('milestone', help='The milestone to list') args = parser.parse_args() @@ -16,11 +16,11 @@ pull_list = [] with short_cache(60): - iterable = iter_pull_request(f"is:pr is:open milestone:{args.milestone}") + iterable = iter_pull_request(f'is:pr is:open milestone:{args.milestone}') for pull in iterable: pull_list.append(pull) -print(f"## {len(pull_list)} opened PRs for milestone {args.milestone}") +print(f'## {len(pull_list)} opened PRs for milestone {args.milestone}') for pull in pull_list: - print(f"* [ ] #{pull.number}") + print(f'* [ ] #{pull.number}') diff --git a/pyproject.toml b/pyproject.toml index 963ad6e..e07a603 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,13 @@ [tool.ruff] +target-version = "py39" +fix = true + +line-length = 79 + +[tool.ruff.lint] # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default. select = ["E", "F", "W", "I", "UP"] -ignore = [] +ignore = ["E501"] -target-version = "py39" -fix = true +[tool.ruff.format] +quote-style = "single" diff --git a/release_utils.py b/release_utils.py index 386c820..1709562 100644 --- a/release_utils.py +++ b/release_utils.py @@ -14,7 +14,7 @@ from unidecode import unidecode from yaml import safe_load -PR_NUM_PATTERN = re.compile(r"\(#(\d+)\)(?:$|\n)") +PR_NUM_PATTERN = re.compile(r'\(#(\d+)\)(?:$|\n)') @contextmanager @@ -50,28 +50,22 @@ def setup_cache(timeout=3600): try: import requests_cache except ImportError: - print("requests_cache not installed", file=sys.stderr) + print('requests_cache not installed', file=sys.stderr) return """setup cache for requests""" - requests_cache.install_cache("github_cache", backend="sqlite", expire_after=timeout) + requests_cache.install_cache( + 'github_cache', backend='sqlite', expire_after=timeout + ) LOCAL_DIR = Path(__file__).parent -REPO_DIR_NAME = "project_repo" -GH = os.environ.get("GH", "github.com") -GH_USER = os.environ.get("GH_USER", "napari") -GH_REPO = os.environ.get("GH_REPO", "napari") -GH_DOCS_REPO = os.environ.get("GH_REPO", "docs") -GH_TOKEN = os.environ.get("GH_TOKEN") -if GH_TOKEN is None: - raise RuntimeError( - "It is necessary that the environment variable `GH_TOKEN` " - "be set to avoid running into problems with rate limiting. " - "One can be acquired at https://github.com/settings/tokens.\n\n" - "You do not need to select any permission boxes while generating " - "the token." - ) +REPO_DIR_NAME = 'project_repo' +GH = os.environ.get('GH', 'github.com') +GH_USER = os.environ.get('GH_USER', 'napari') +GH_REPO = os.environ.get('GH_REPO', 'napari') +GH_DOCS_REPO = os.environ.get('GH_REPO', 'docs') +GH_TOKEN = os.environ.get('GH_TOKEN') _G = None @@ -79,13 +73,21 @@ def setup_cache(timeout=3600): def get_github(): global _G if _G is None: + if GH_TOKEN is None: + raise RuntimeError( + 'It is necessary that the environment variable `GH_TOKEN` ' + 'be set to avoid running into problems with rate limiting. ' + 'One can be acquired at https://github.com/settings/tokens.\n\n' + 'You do not need to select any permission boxes while generating ' + 'the token.' + ) _G = Github(GH_TOKEN) return _G def get_repo(user=GH_USER, repo=GH_REPO): g = get_github() - return g.get_repo(f"{user}/{repo}") + return g.get_repo(f'{user}/{repo}') def get_local_repo(path=None): @@ -107,12 +109,12 @@ def get_common_ancestor(commit1, commit2): return local_repo.merge_base(commit1, commit2)[0] -def get_commits_to_ancestor(ancestor, rev="main"): +def get_commits_to_ancestor(ancestor, rev='main'): local_repo = get_local_repo() - yield from local_repo.iter_commits(f"{ancestor.hexsha}..{rev}") + yield from local_repo.iter_commits(f'{ancestor.hexsha}..{rev}') -def get_commit_counts_from_ancestor(release, rev="main"): +def get_commit_counts_from_ancestor(release, rev='main'): """ get number of commits from ancestor to release """ @@ -125,43 +127,46 @@ def get_commit_counts_from_ancestor(release, rev="main"): def get_milestone( milestone_name: str | None, + repo: str = f'{GH_USER}/{GH_REPO}', ) -> Milestone.Milestone | None: if milestone_name is None: return None - repository = get_repo() + repository = get_repo(*repo.split('/')) with contextlib.suppress(ValueError): return repository.get_milestone(int(milestone_name)) for milestone in repository.get_milestones(): if milestone.title == milestone_name: return milestone - raise RuntimeError(f"Milestone {milestone_name} not found") + raise RuntimeError(f'Milestone {milestone_name} not found') -def get_split_date(previous_release, rev="main"): +def get_split_date(previous_release, rev='main'): common_ancestor = get_common_ancestor(previous_release, rev) remote_commit = get_repo().get_commit(common_ancestor.hexsha) - return datetime.strptime(remote_commit.last_modified, "%a, %d %b %Y %H:%M:%S %Z") + return datetime.strptime( + remote_commit.last_modified, '%a, %d %b %Y %H:%M:%S %Z' + ) def iter_pull_request(additional_query, user=GH_USER, repo=GH_REPO): iterable = get_github().search_issues( - f"repo:{user}/{repo} is:pr sort:created-asc {additional_query}" + f'repo:{user}/{repo} is:pr sort:created-asc {additional_query}' ) print( - f"Found {iterable.totalCount} pull requests on query: {additional_query}" - f" for repo {user}/{repo}", + f'Found {iterable.totalCount} pull requests on query: {additional_query}' + f' for repo {user}/{repo}', file=sys.stderr, ) for pull_issue in tqdm( iterable, - desc="Pull Requests...", + desc=f'Pull Requests ({user}/{repo})...', total=iterable.totalCount, ): yield pull_issue.as_pull_request() -def get_pr_commits_dict(repo: Repo, branch: str = "main") -> dict[int, str]: +def get_pr_commits_dict(repo: Repo, branch: str = 'main') -> dict[int, str]: """ Calculate mapping from PR number in commit hash from a provided branch Parameters @@ -206,15 +211,15 @@ def get_consumed_pr(repo: Repo, target_branch: str) -> set[int]: def existing_file(path: str) -> Path: path = Path(path) if not path.exists(): - raise FileNotFoundError(f"{path} not found") + raise FileNotFoundError(f'{path} not found') return path BOT_LIST = { - "github-actions[bot]", - "pre-commit-ci[bot]", - "dependabot[bot]", - "napari-bot", + 'github-actions[bot]', + 'pre-commit-ci[bot]', + 'dependabot[bot]', + 'napari-bot', None, } @@ -229,39 +234,45 @@ def get_correction_dict(file_path: Path | None) -> dict[str, str]: correction_dict = {} with open(file_path) as f: corrections = safe_load(f) - for correction in corrections["login_to_name"]: - correction_dict[correction["login"]] = unidecode( - correction["corrected_name"].lower() + for correction in corrections['login_to_name']: + correction_dict[correction['login']] = unidecode( + correction['corrected_name'].lower() ) return correction_dict -def get_corrections_from_citation_cff(cff_data: str | Path | dict) -> dict[str, str]: +def get_corrections_from_citation_cff( + cff_data: str | Path | dict, +) -> dict[str, str]: if isinstance(cff_data, (str, Path)): cff_data = Path(cff_data) if not cff_data.exists(): return {} - with cff_data.open(encoding="utf8") as f: + with cff_data.open(encoding='utf8') as f: cff_data = safe_load(f) res = {} - for author in cff_data["authors"]: - if "alias" in author: - res[author["alias"]] = f'{author["given-names"]} {author["family-names"]}' + for author in cff_data['authors']: + if 'alias' in author: + res[author['alias']] = ( + f'{author["given-names"]} {author["family-names"]}' + ) return res -def get_corrections_from_citation_cff2(cff_data: str | Path | dict) -> dict[str, str]: +def get_corrections_from_citation_cff2( + cff_data: str | Path | dict, +) -> dict[str, str]: if isinstance(cff_data, (str, Path)): cff_data = Path(cff_data) if not cff_data.exists(): return {} - with cff_data.open(encoding="utf8") as f: + with cff_data.open(encoding='utf8') as f: cff_data = safe_load(f) res = {} - for author in cff_data["authors"]: - if "alias" in author: - res[author["alias"]] = unidecode( + for author in cff_data['authors']: + if 'alias' in author: + res[author['alias']] = unidecode( f'{author["given-names"]} {author["family-names"]}'.lower() ) return res diff --git a/sort_citation_cff.py b/sort_citation_cff.py index 5ec65c3..3459f72 100644 --- a/sort_citation_cff.py +++ b/sort_citation_cff.py @@ -1,35 +1,38 @@ """ Sort CITATION.cff files by author family-name. """ + import argparse from pathlib import Path from yaml import safe_dump, safe_load parser = argparse.ArgumentParser() -parser.add_argument("path", help="The path to the citation file to sort", type=Path) +parser.add_argument( + 'path', help='The path to the citation file to sort', type=Path +) args = parser.parse_args() -with args.path.open(encoding="utf8") as f: +with args.path.open(encoding='utf8') as f: data = safe_load(f) def reorder_author_fields(author): res = {} - for key in ["given-names", "family-names", "affiliation", "orcid"]: + for key in ['given-names', 'family-names', 'affiliation', 'orcid']: if key in author: res[key] = author[key] return res -data["authors"] = [ +data['authors'] = [ reorder_author_fields(x) - for x in sorted(data["authors"], key=lambda x: x["family-names"]) + for x in sorted(data['authors'], key=lambda x: x['family-names']) ] -with args.path.open("w", encoding="utf8") as f: +with args.path.open('w', encoding='utf8') as f: safe_dump(data, f, sort_keys=False, allow_unicode=True)