Skip to content

Commit

Permalink
summary messages
Browse files Browse the repository at this point in the history
  • Loading branch information
fishingguy456 committed Jun 22, 2022
1 parent abc0428 commit 6729aa5
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 14 deletions.
15 changes: 10 additions & 5 deletions examples/autotest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from imgtools.ops import StructureSetToSegmentation, ImageAutoInput, ImageAutoOutput, Resample
from imgtools.pipeline import Pipeline
from imgtools.utils.nnunetutils import generate_dataset_json
from imgtools.utils.args import parser
from joblib import Parallel, delayed
from imgtools.modules import Segmentation
from torch import sparse_coo_tensor
Expand Down Expand Up @@ -105,11 +106,14 @@ def __init__(self,
self.label_names = {}
self.ignore_missing_regex = ignore_missing_regex

with open(pathlib.Path(self.input_directory, "roi_names.yaml").as_posix(), "r") as f:
try:
self.label_names = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
roi_path = pathlib.Path(self.input_directory, "roi_names.yaml").as_posix()

if os.path.exists(roi_path):
with open(roi_path, "r") as f:
try:
self.label_names = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)

if not isinstance(self.label_names, dict):
raise ValueError("roi_names.yaml must parse as a dictionary")
Expand Down Expand Up @@ -189,6 +193,7 @@ def process_one_subject(self, subject_id):
subject_id : str
The ID of subject to process
"""
# if we want overwriting or if we don't want it and the file doesn't exist, we can process
if self.overwrite or (not self.overwrite and not (os.path.exists(pathlib.Path(self.output_directory, subject_id).as_posix()) or self.glob_checker_nnunet(subject_id))):
#Check if the subject_id has already been processed
if os.path.exists(pathlib.Path(self.output_directory,".temp",f'temp_{subject_id}.pkl').as_posix()):
Expand Down
29 changes: 20 additions & 9 deletions imgtools/autopipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,14 @@ def __init__(self,
self.label_names = {}
self.ignore_missing_regex = ignore_missing_regex

with open(pathlib.Path(self.input_directory, "roi_names.yaml").as_posix(), "r") as f:
try:
self.label_names = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
roi_path = pathlib.Path(self.input_directory, "roi_names.yaml").as_posix()

if os.path.exists(roi_path):
with open(roi_path, "r") as f:
try:
self.label_names = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)

if not isinstance(self.label_names, dict):
raise ValueError("roi_names.yaml must parse as a dictionary")
Expand Down Expand Up @@ -190,6 +193,7 @@ def process_one_subject(self, subject_id):
subject_id : str
The ID of subject to process
"""
# if we want overwriting or if we don't want it and the file doesn't exist, we can process
if self.overwrite or (not self.overwrite and not (os.path.exists(pathlib.Path(self.output_directory, subject_id).as_posix()) or self.glob_checker_nnunet(subject_id))):
#Check if the subject_id has already been processed
if os.path.exists(pathlib.Path(self.output_directory,".temp",f'temp_{subject_id}.pkl').as_posix()):
Expand Down Expand Up @@ -411,10 +415,10 @@ def run(self):
print("Dataset already processed...")
shutil.rmtree(pathlib.Path(self.output_directory, ".temp").as_posix())
else:
# Parallel(n_jobs=self.n_jobs, verbose=verbose)(
# delayed(self._process_wrapper)(subject_id) for subject_id in subject_ids)
for subject_id in subject_ids:
self._process_wrapper(subject_id)
Parallel(n_jobs=self.n_jobs, verbose=verbose)(
delayed(self._process_wrapper)(subject_id) for subject_id in subject_ids)
# for subject_id in subject_ids:
# self._process_wrapper(subject_id)
self.save_data()
all_patient_names = glob.glob(pathlib.Path(self.input_directory, "*"," ").as_posix()[0:-1])
all_patient_names = [os.path.split(os.path.split(x)[0])[1] for x in all_patient_names]
Expand Down Expand Up @@ -452,6 +456,13 @@ def main():
* dataset.json can be found at /path/to/dataset/json
* You can train nnU-Net by cloning /path/to/nnunet/repo and run `nnUNet_plan_and_preprocess -t taskID` to let the nnU-Net package prepare
"""
print(f"Outputted data to {args.output_directory}")
csv_path = pathlib.Path(args.output_directory, "dataset.cav").as_posix()
print(f"Dataset info found at {csv_path}")
if args.nnunet:
json_path = pathlib.Path(args.output_directory, "dataset.json").as_posix()
print(f"dataset.json for nnU-net can be found at {json_path}")
print("You can train nnU-net by cloning https://github.com/MIC-DKFZ/nnUNet/ and run `nnUNet_plan_and_preprocess -t taskID` to let the nnU-Net package prepare")

if __name__ == "__main__":
main()
Expand Down

0 comments on commit 6729aa5

Please sign in to comment.