Skip to content

Commit

Permalink
build: update actions to perform linting and minimal dry-run testing (#1
Browse files Browse the repository at this point in the history
)

updated github workflows and linting/formatting of workflow
  • Loading branch information
akhanf authored Feb 14, 2024
1 parent 9520207 commit 8b539e2
Show file tree
Hide file tree
Showing 11 changed files with 296 additions and 247 deletions.
6 changes: 0 additions & 6 deletions .github/snakebids_action_requirements.txt

This file was deleted.

72 changes: 31 additions & 41 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,52 +6,42 @@ on:
pull_request:
branches: [ main ]


jobs:
Formatting:
tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Formatting
uses: github/super-linter@v4
env:
VALIDATE_ALL_CODEBASE: false
DEFAULT_BRANCH: main
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_SNAKEMAKE_SNAKEFMT: true
strategy:
max-parallel: 5

Linting:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Lint workflow
uses: snakemake/snakemake-github-action@v1.24.0
- name: Set up Python 3.11
uses: actions/setup-python@v2
with:
directory: .
stagein: "pip install --no-deps -r .github/snakemake_action_requirements.txt"
snakefile: workflow/Snakefile
args: "--lint"
python-version: 3.11
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda install -c conda-forge mamba
# - name: Lint with flake8
# run: |
# mamba install flake8
# # stop the build if there are Python syntax errors or undefined names
# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Setup snakemake and workflow dependencies
run: |
mamba install -c conda-forge -c bioconda pytest snakemake singularity snakefmt
pip install .
- name: Lint with snakemake and snakefmt
run: |
snakemake --lint
snakefmt workflow
- name: Integration dry-run test
run: |
snakemake -np
Testing:
runs-on: ubuntu-latest
needs:
- Linting
- Formatting
steps:
- uses: actions/checkout@v2

- name: Test workflow
uses: snakemake/snakemake-github-action@v1.24.0
with:
directory: .test
stagein: "pip install --no-deps -r .github/snakemake_action_requirements.txt"
snakefile: workflow/Snakefile
args: "--sdm apptainer --show-failed-logs --cores 1 --all-temp"
- name: Test report
uses: snakemake/snakemake-github-action@v1.24.0
with:
directory: .test
stagein: "pip install --no-deps -r .github/snakemake_action_requirements.txt"
snakefile: workflow/Snakefile
args: "--report report.zip"
Empty file added spimprep/__init__.py
Empty file.
90 changes: 4 additions & 86 deletions workflow/Snakefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import json
from snakebids import bids, set_bids_spec
from pathlib import Path
from snakebids import bids, set_bids_spec
import pandas as pd
import os

Expand All @@ -9,13 +9,13 @@ configfile: "config/config.yml"


container: config["containers"]["spimprep"]
conda: config["conda_envs"]["global"]


# use expandvars so we can use e.g. '$SLURM_TMPDIR'
root = os.path.expandvars(config["root"])
work = os.path.expandvars(config["work"])

#this is needed to use the latest bids spec with the pre-release snakebids
# this is needed to use the latest bids spec with the pre-release snakebids
set_bids_spec("v0_10_1")

# read datasets tsv
Expand All @@ -33,89 +33,7 @@ datasets = pd.read_csv(
)


def get_all_targets():
targets = []
for i in range(len(datasets)):
targets.extend(
expand(
bids(
root=root,
subject="{subject}",
datatype="micr",
sample="{sample}",
acq="{acq}",
desc="{desc}",
stain="{stain}",
suffix="spim.ome.zarr.zip",
),
subject=datasets.loc[i, "subject"],
sample=datasets.loc[i, "sample"],
acq=datasets.loc[i, "acq"],
desc=config["targets"]["desc"],
stain=[datasets.loc[i, "stain_0"], datasets.loc[i, "stain_1"]],
)
)
targets.extend(
expand(
bids(
root=root,
subject="{subject}",
datatype="micr",
sample="{sample}",
acq="{acq}",
desc="{desc}",
from_="{template}",
suffix="dseg.ome.zarr.zip",
),
subject=datasets.loc[i, "subject"],
sample=datasets.loc[i, "sample"],
acq=datasets.loc[i, "acq"],
desc=config["targets"]["desc"],
template=config["templates"],
stain=[datasets.loc[i, "stain_0"], datasets.loc[i, "stain_1"]],
)
)
targets.extend(
expand(
bids(
root=root,
subject="{subject}",
datatype="micr",
sample="{sample}",
acq="{acq}",
desc="{desc}",
stain="{stain}",
level="{level}",
suffix="spim.nii",
),
subject=datasets.loc[i, "subject"],
sample=datasets.loc[i, "sample"],
acq=datasets.loc[i, "acq"],
desc=config["targets"]["desc"],
level=config["nifti"]["levels"],
stain=[datasets.loc[i, "stain_0"], datasets.loc[i, "stain_1"]],
)
)

return targets


def get_dataset_path(wildcards):
df = datasets.query(
f"subject=='{wildcards.subject}' and sample=='{wildcards.sample}' and acq=='{wildcards.acq}'"
)
return df.dataset_path.to_list()[0]


def get_stains(wildcards):
df = datasets.query(
f"subject=='{wildcards.subject}' and sample=='{wildcards.sample}' and acq=='{wildcards.acq}'"
)

return [
df.stain_0.to_list()[0],
df.stain_1.to_list()[0],
]
include: "rules/common.smk"


rule all:
Expand Down
6 changes: 0 additions & 6 deletions workflow/envs/global.yml

This file was deleted.

31 changes: 19 additions & 12 deletions workflow/rules/atlasreg.smk
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
def bids_tpl(root, template, **entities):
"""bids() wrapper for files in tpl-template folder"""
return str(Path(bids(root=root, tpl=template)) / bids(tpl=template, **entities))


rule import_anat:
input:
anat=lambda wildcards: config["atlases"][wildcards.template]["anat"],
output:
anat=bids_tpl(root=root, template="{template}", suffix="anat.nii.gz"),
log:
bids_tpl(root='logs',datatype="import_anat",template="{template}", suffix="log.txt")
bids_tpl(
root="logs",
datatype="import_anat",
template="{template}",
suffix="log.txt",
),
shell:
"cp {input} {output}"

Expand All @@ -20,7 +21,12 @@ rule import_dseg:
output:
dseg=bids_tpl(root=root, template="{template}", suffix="dseg.nii.gz"),
log:
bids_tpl(root='logs',datatype="import_dseg",template="{template}", suffix="log.txt")
bids_tpl(
root="logs",
datatype="import_dseg",
template="{template}",
suffix="log.txt",
),
shell:
"cp {input} {output}"

Expand All @@ -31,7 +37,9 @@ rule import_lut:
output:
tsv=bids_tpl(root=root, template="{template}", suffix="dseg.tsv"),
log:
bids_tpl(root='logs',datatype="import_lut",template="{template}", suffix="log.txt")
bids_tpl(
root="logs", datatype="import_lut", template="{template}", suffix="log.txt"
),
script:
"../scripts/import_labelmapper_lut.py"

Expand Down Expand Up @@ -75,7 +83,7 @@ rule affine_reg:
),
log:
bids(
root='logs',
root="logs",
subject="{subject}",
datatype="affine_reg",
sample="{sample}",
Expand Down Expand Up @@ -129,7 +137,7 @@ rule deform_reg:
),
log:
bids(
root='logs',
root="logs",
subject="{subject}",
datatype="deform_reg",
sample="{sample}",
Expand Down Expand Up @@ -178,7 +186,7 @@ rule resample_labels_to_zarr:
),
log:
bids(
root='logs',
root="logs",
subject="{subject}",
datatype="resample_labels_to_zarr",
sample="{sample}",
Expand All @@ -187,7 +195,6 @@ rule resample_labels_to_zarr:
space="{template}",
suffix="log.txt",
),

script:
"../scripts/resample_labels_to_zarr.py"

Expand Down Expand Up @@ -234,7 +241,7 @@ rule zarr_to_ome_zarr_labels:
"preproc"
log:
bids(
root='logs',
root="logs",
subject="{subject}",
datatype="zarr_to_ome_zarr_labels",
sample="{sample}",
Expand Down
Loading

0 comments on commit 8b539e2

Please sign in to comment.