Skip to content

Commit

Permalink
bug fixes_1.0
Browse files Browse the repository at this point in the history
Former-commit-id: d255674
  • Loading branch information
Vishwesh4 committed Dec 9, 2021
1 parent ea70778 commit 63604ae
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 35 deletions.
19 changes: 13 additions & 6 deletions imgtools/autopipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
from joblib import Parallel, delayed
import glob
import ast
import datetime
import numpy as np
import json
###############################################################
# Example usage:
# python radcure_simple.py ./data/RADCURE/data ./RADCURE_output
Expand Down Expand Up @@ -61,6 +64,10 @@ def __init__(self,
# output ops
self.output = ImageAutoOutput(self.output_directory, self.output_streams)

#Make a directory
if not os.path.exists(os.path.join(self.output_directory,".temp")):
os.mkdir(os.path.join(self.output_directory,".temp"))


def process_one_subject(self, subject_id):
"""Define the processing operations for one subject.
Expand All @@ -75,7 +82,7 @@ def process_one_subject(self, subject_id):
The ID of subject to process
"""
#Check if the subject_id has already been processed
if os.path.exists(os.path.join(self.output_directory,f'temp_{subject_id}.txt')):
if os.path.exists(os.path.join(self.output_directory,".temp",f'temp_{subject_id}.json')):
print(f"{subject_id} already processed")
return

Expand Down Expand Up @@ -172,16 +179,16 @@ def process_one_subject(self, subject_id):
metadata[f"metadata_{colname}"] = [read_results[i].get_metadata()]
print(subject_id, " SAVED PET")
#Saving all the metadata in multiple text files
with open(os.path.join(self.output_directory,f'temp_{subject_id}.txt'),'w') as f:
f.write(str(metadata))
with open(os.path.join(self.output_directory,".temp",f'temp_{subject_id}.json'),'w') as f:
json.dump(metadata,f)
return

def save_data(self):
files = glob.glob(os.path.join(self.output_directory,"*.txt"))
files = glob.glob(os.path.join(self.output_directory,".temp","*.json"))
for file in files:
subject_id = ("_").join(file.replace("/","_").replace(".","_").split("_")[-3:-1])
A = open(file,"r").readlines()
metadata = ast.literal_eval(A[0])
with open(file) as f:
metadata = json.load(f)
self.output_df.loc[subject_id, list(metadata.keys())] = list(metadata.values())
os.remove(file)
self.output_df.to_csv(self.output_df_path)
Expand Down
10 changes: 5 additions & 5 deletions imgtools/io/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
from typing import List, Sequence, Optional, Callable, Iterable, Dict,Tuple
import torchio as tio
import pandas as pd
from . import file_name_convention
from ..ops import StructureSetToSegmentation, ImageAutoInput, Resample, BaseOp
# from imgtools.io import file_name_convention
# from imgtools.ops import StructureSetToSegmentation, ImageAutoInput, Resample, BaseOp
# from . import file_name_convention
# from ..ops import StructureSetToSegmentation, ImageAutoInput, Resample, BaseOp
from imgtools.io import file_name_convention
from imgtools.ops import StructureSetToSegmentation, ImageAutoInput, Resample, BaseOp
from tqdm import tqdm
from joblib import Parallel, delayed
import SimpleITK as sitk
Expand Down Expand Up @@ -145,7 +145,7 @@ def process_one_subject(
Warning("No CT image present. Returning dose image without resampling")
doses = read_results[i]
temp[f"mod_{colname}"] = tio.ScalarImage.from_sitk(doses)
temp[f"metadata_{colname}"] = str(read_results[i].get_metadata())
temp[f"metadata_{colname}"] = read_results[i].get_metadata()
elif modality == "RTSTRUCT":
#For RTSTRUCT, you need image or PT
structure_set = read_results[i]
Expand Down
4 changes: 2 additions & 2 deletions imgtools/modules/dose.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ def get_metadata(self):
self.dvh[ROI_reference]["total_vol"] = tot_vol
except:
# TO-DO: more nuanced error catch instead of returning None
warnings.warn("No DVH information present in the DICOM. Returning None")
self.dvh = None
warnings.warn("No DVH information present in the DICOM. Returning empty dictionary")
self.dvh = {}

return self.dvh

Expand Down
45 changes: 23 additions & 22 deletions tests/test_autopipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,28 @@

@pytest.fixture
def dataset_path():
curr_path=("/").join(os.getcwd().split("/")[:-1])
input_path = curr_path+ "/examples/data_test"
output_path = curr_path+ "/tests/"
curr_path=os.getcwd().split("/")
curr_path = ("/").join(curr_path[0:curr_path.index("imgtools")+1])
input_path = os.path.join(curr_path,"examples","data_test")
output_path = os.path.join(curr_path, "tests")
return input_path,output_path

@pytest.mark.parametrize("modalities",["PT","CT,RTDOSE","CT,PT,RTDOSE","CT,RTSTRUCT,RTDOSE","CT,RTSTRUCT,RTDOSE,PT"])
# @pytest.mark.parametrize("modalities",["CT,PT,RTDOSE"])
def test_pipeline(dataset_path,modalities):
input_path,output_path = dataset_path
n_jobs = 2
output_path_mod = output_path + "temp_folder_" + ("_").join(modalities.split(","))
output_path_mod = os.path.join(output_path,"temp_folder_" + ("_").join(modalities.split(",")))
#Initialize pipeline for the current setting
pipeline = AutoPipeline(input_path,output_path_mod,modalities,n_jobs=n_jobs)
#Run for different modalities
comp_path = os.path.join(output_path_mod, "dataset.csv")
pipeline.run()

#Check if the crawl and edges exist
crawl_path = ("/").join(input_path.split("/")[:-1]) + "/imgtools_" + input_path.split("/")[-1] + ".csv"
json_path = ("/").join(input_path.split("/")[:-1]) + "/imgtools_" + input_path.split("/")[-1] + ".json"
edge_path = ("/").join(input_path.split("/")[:-1]) + "/imgtools_" + input_path.split("/")[-1] + "_edges.csv"
crawl_path = os.path.join(("/").join(input_path.split("/")[:-1]),"imgtools_" + input_path.split("/")[-1] + ".csv")
json_path = os.path.join(("/").join(input_path.split("/")[:-1]),"imgtools_" + input_path.split("/")[-1] + ".json")
edge_path = os.path.join(("/").join(input_path.split("/")[:-1]),"imgtools_" + input_path.split("/")[-1] + "_edges.csv")
assert os.path.exists(crawl_path) & os.path.exists(edge_path), "this breaks because there was no crawler output"

#for the test example, there are 6 files and 4 connections
Expand All @@ -48,38 +49,38 @@ def test_pipeline(dataset_path,modalities):

#Check the nrrd files
if modalities=="PT":
path_pet = output_path_mod + "/pet/" + os.listdir(output_path_mod+"/pet")[0]
path_pet = os.path.join(output_path_mod,"pet",os.listdir(os.path.join(output_path_mod,"pet"))[0])
dicom,_ = nrrd.read(path_pet)
assert dicom.shape[-1] == int(crawl_data.loc[crawl_data["modality"]=="PT","instances"].values[0])
elif modalities=="CT,RTDOSE":
path_ct = output_path_mod + "/image/" + os.listdir(output_path_mod+"/image")[0]
path_dose = output_path_mod + "/dose/" + os.listdir(output_path_mod+"/dose")[0]
path_ct = os.path.join(output_path_mod,"image",os.listdir(os.path.join(output_path_mod,"image"))[0])
path_dose = os.path.join(output_path_mod,"dose",os.listdir(os.path.join(output_path_mod,"dose"))[0])
dicom_ct,_ = nrrd.read(path_ct)
dicom_dose,_ = nrrd.read(path_dose)
assert dicom_ct.shape == dicom_dose.shape
elif modalities=="CT,PT,RTDOSE":
path_ct = output_path_mod + "/image/" + os.listdir(output_path_mod+"/image")[0]
path_dose = output_path_mod + "/dose/" + os.listdir(output_path_mod+"/dose")[0]
path_pet = output_path_mod + "/pet/" + os.listdir(output_path_mod+"/pet")[0]
path_ct = os.path.join(output_path_mod,"image",os.listdir(os.path.join(output_path_mod,"image"))[0])
path_dose = os.path.join(output_path_mod,"dose",os.listdir(os.path.join(output_path_mod,"dose"))[0])
path_pet = os.path.join(output_path_mod,"pet",os.listdir(os.path.join(output_path_mod,"pet"))[0])
dicom_ct,_ = nrrd.read(path_ct)
dicom_dose,_ = nrrd.read(path_dose)
dicom_pet,_ = nrrd.read(path_pet)
assert dicom_ct.shape == dicom_dose.shape == dicom_pet.shape
elif modalities=="CT,RTSTRUCT,RTDOSE":
path_ct = output_path_mod + "/image/" + os.listdir(output_path_mod+"/image")[0]
path_dose = output_path_mod + "/dose/" + os.listdir(output_path_mod+"/dose")[0]
path_str = output_path_mod + "/mask_ct/" + os.listdir(output_path_mod+"/mask_ct")[0]
path_ct = os.path.join(output_path_mod,"image",os.listdir(os.path.join(output_path_mod,"image"))[0])
path_dose = os.path.join(output_path_mod,"dose",os.listdir(os.path.join(output_path_mod,"dose"))[0])
path_str = os.path.join(output_path_mod,"mask_ct",os.listdir(os.path.join(output_path_mod,"mask_ct"))[0])
dicom_ct,_ = nrrd.read(path_ct)
dicom_dose,_ = nrrd.read(path_dose)
dicom_str,_ = nrrd.read(path_str)
#ensure they are in same physical space
assert dicom_ct.shape == dicom_dose.shape == dicom_str.shape[1:]
else:
path_ct = output_path_mod + "/image/" + os.listdir(output_path_mod+"/image")[0]
path_dose = output_path_mod + "/dose/" + os.listdir(output_path_mod+"/dose")[0]
path_ctstr = output_path_mod + "/mask_ct/" + os.listdir(output_path_mod+"/mask_ct")[0]
path_ptstr = output_path_mod + "/mask_pt/" + os.listdir(output_path_mod+"/mask_pt")[0]
path_pet = output_path_mod + "/pet/" + os.listdir(output_path_mod+"/pet")[0]
path_ct = os.path.join(output_path_mod,"image",os.listdir(os.path.join(output_path_mod,"image"))[0])
path_dose = os.path.join(output_path_mod,"dose",os.listdir(os.path.join(output_path_mod,"dose"))[0])
path_ctstr = os.path.join(output_path_mod,"mask_ct",os.listdir(os.path.join(output_path_mod,"mask_ct"))[0])
path_ptstr = os.path.join(output_path_mod,"mask_pt",os.listdir(os.path.join(output_path_mod,"mask_pt"))[0])
path_pet = os.path.join(output_path_mod,"pet",os.listdir(os.path.join(output_path_mod,"pet"))[0])
dicom_ct,_ = nrrd.read(path_ct)
dicom_dose,_ = nrrd.read(path_dose)
dicom_ctstr,_ = nrrd.read(path_ctstr)
Expand All @@ -90,6 +91,6 @@ def test_pipeline(dataset_path,modalities):
os.remove(crawl_path)
os.remove(json_path)
os.remove(edge_path)
shutil.rmtree(output_path_mod)
# shutil.rmtree(output_path_mod)


0 comments on commit 63604ae

Please sign in to comment.