From 5ffe7508b79fc7b830e67529b6d687aecdc866af Mon Sep 17 00:00:00 2001 From: Christoph Berger Date: Wed, 12 Dec 2018 10:59:54 +0100 Subject: [PATCH] Initial code commit with baseline files --- .gitignore | 1 + README.md | 44 ++++++ fusion/fusion.py | 327 ++++++++++++++++++++++++++++++++++++++++ segment.py | 206 +++++++++++++++++++++++++ util/__init__.py | 1 + util/clean.py | 28 ++++ util/filemanager.py | 205 +++++++++++++++++++++++++ util/own_itk.py | 270 +++++++++++++++++++++++++++++++++ util/score.py | 355 ++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 1437 insertions(+) create mode 100755 fusion/fusion.py create mode 100755 segment.py create mode 100755 util/__init__.py create mode 100755 util/clean.py create mode 100755 util/filemanager.py create mode 100755 util/own_itk.py create mode 100755 util/score.py diff --git a/.gitignore b/.gitignore index e7dfa09..88c23d9 100644 --- a/.gitignore +++ b/.gitignore @@ -105,3 +105,4 @@ venv.bak/ # mypy .mypy_cache/ +*.code-workspace diff --git a/README.md b/README.md index 3abcf87..c68ac11 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,46 @@ # brats-orchestra BraTS ensemble code based on the Docker images used in the BraTS Challenge 2018 + +##### Author: Christoph Berger +##### Version: 0.1 + +This code was part of my Bachelor`s thesis submitted at the Technical University of Munich in October 2018. The version published here is in the process of being adapted and built into a more general purpose segmentation pipeline to support more inputs and greater modularity. + +This code makes use of containers taken from the BraTS Algorithmic repository, which can be found here: https://github.com/BraTS/Instructions/ + +Further info regarding the BraTS challenge (rules, how to particpate, publications) can be found at the official homepage: https://www.med.upenn.edu/sbia/brats2018.html + +Some of the fusion results are pre-published in this summarizing manuscript: https://arxiv.org/abs/1811.02629 +Please contact me if you intend to use parts of this work for your research, we'd be thrilled to hear about it. + +Current functionality: +- `segment.py` is the front-end script to manage Docker containers for segmentation tasks and organises files to work with the containers +- `fusion/fusion.py` uses the resulting individual fusions to create a final result (using various methods) +- `util/` contains various scripts to manage files on the filesystem, calculate metrics for segmentation performance, load and store medical images and more + +### Usage of segment.py +``` +python3 segment.py /brats/dir/path/ +``` +`/brats/dir/path/` is the path where all subject folders are located, which must look like this: +- `/brats/dir/path/` + - `pat123/` + - `flair.nii.gz` + - `t1.nii.gz` + - `t2.nii.gz` + - `t1c.nii.gz` + - `pat456/` + - `...` + +And so on.. Resulting segmentations will be placed into `pat123/pat123__results/tumor__class.nii.gz` + +### Requirements +You need to have a working installation of Docker running on your system. Also, install all other required packages for this code to run using: + +``` +pip install -r requirements.txt +``` + +### Current Tasks +- rebuild the configuration file system to use JSON and simplify I/O +- compatibility for automated GPU segmentation using Nvidia-Docker diff --git a/fusion/fusion.py b/fusion/fusion.py new file mode 100755 index 0000000..41e9607 --- /dev/null +++ b/fusion/fusion.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for the fusion of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +import os +import scipy.io as spio +import numpy as np +import util.own_itk as oitk +import util.score as score +import util.filemanager as fmgnr +import errno +from pprint import pprint +from tqdm import tqdm + +import SimpleITK as sitk + +# global dimensions of segmentation volume +dim = (155,240,240) + +def staple(segmentations, proto): + estimate = np.zeros(dim, dtype=np.float64) + stapler = sitk.STAPLEImageFilter() + stapler.SetForegroundValue(1.0) + segs = [] + for _, s in segmentations.items(): + segs.append(oitk.make_itk_image(s, proto)) + estimate = stapler.Execute(segs) + return estimate + +def setWeights(weighted=False, method='wt', directory=0, verbose=False): + """ Receives a list of containers and the path to the + segmentation results, then matches the right result names + to the passed container names and creates a dict with the + proper weights for the fusion methods. + weights are determined based on the method used and the + dice score that is achieved by the containers on the training set + input: containers (list of container/algorithm-ids) + method (fusion method to be used wt, at or ct, default is wt) + directory (path to the segmentation results) + verbose (terminal output yes or no) + returns: dictionary containing the weights with the result folder + name pattern as key + + """ + # TODO look up weights by dice-score in external file + # static weights for testing + if weighted: + weights = { + 'istb_aj': 0.797171488, + 'brats_ac': 0.745408624, + 'changken1qtimlabbrats_final': 0.749367076, + 'ekrivovbrats2017_old': 0.748456509, + 'kamleshpbrats17': 0.70736689, + 'mikaelagnmagnrbm': 0.741016945, + 'sarassaras_test_brats_2017': 0.749367076, + 'gevaertlab': 0.74343376, + 'brats_dc': 0.7, + 'default': 0.7 + } + else: + weights = { + 'istb_aj': 1, + 'brats_ac': 1, + 'changken1qtimlabbrats_final': 1, + 'ekrivovbrats2017_old': 1, + 'kamleshpbrats17': 1, + 'mikaelagnmagnrbm': 1, + 'sarassaras_test_brats_2017': 1, + 'gevaertlab': 1, + 'qtimlab': 1, + 'brats_dc': 1, + 'isensee' : 1, + 'default': 1 + } + return weights + +def assembleFiles(directory, static_weights, t=0, verbose=True): + """ Takes a list of container names, looks for the segmentation + results and then loads each of them into a list of numpy arrays + input: filename (patient-id) + containers (list of container ids) + t (threshold for class border) + static_weights (weights for static majority voting - + should be 1 in first iteration of simple) + returns a list of numpy arrays + """ + # empty dicts for segmentations and weight, indeces are the algorithm + # IDs + segmentations = dict() + weights = dict() + i = 0 + #iterate through the directory + for subdirs in os.listdir(directory): + if not os.path.isdir(os.path.join(directory, subdirs)): + continue # Not a directory + #don't distinguish between upper- and lowercase + #if 'brats' in subdirs or 'Brats' in subdirs: + elif 'fusion' in subdirs: + continue + else: + files = os.listdir(os.path.join(directory, subdirs)) + subdir = os.path.join(directory, subdirs) + for file in files: + if 'nii' in file: + if(verbose): + print('Segmentation number: ', i) + # print(os.path.join(subdir, file)) + path = (os.path.abspath(os.path.join(subdir, file))) + if verbose: + print('Loading ITK image...') + temp = oitk.get_itk_image(path) + tmp = oitk.reduce_arr_dtype(oitk.get_itk_array(temp), False) + #binary file for whole tumor + result = np.zeros(tmp.shape) + if t == 1: + result[tmp == 1] = 1 + elif t == 0: + result[tmp > 0] = 1 + else: + result[tmp == 4] = 1 + if verbose: + print('BIN SEG MAX:', tmp.max()) + print('RESULT TMP MAX', result.max(), 'AND NONZERO Values', result.sum()) + #append matching weight TODO + key = os.path.splitext(file)[0] + print('Directory: ', os.path.splitext(subdir)[0]) + if verbose: + print('Key is: ', key) + #skip pointless segmentations (less than 500 voxels classified) + if result.sum() < 500: + continue + segmentations[key] = result + for d, value in static_weights.items(): + if d in subdirs: + weights[key] = value + if verbose: + print('Appended weight', static_weights[d], 'to segmentation', subdirs, 'with algo', d, 'and key', key) + i += 1 + pprint(weights) + return segmentations, weights + +def wtMajorityVote(segmentations, weights, verbose=True): + """ Performs a majority vote fusion of an arbitrary number of + segmentation results + Compound: votes are fused to either 0 or 1 label (no tumor vs tumor) + input: list of segmentations (numpy arrays) + return: fused result as a numpy array + """ + num = len(segmentations) + # manage empty calls + if num == 0: + print('ERROR! No segmentations to fuse.') + return np.zeros(dim) + if verbose: + print ('Number of segmentations to be fused using compound majority vote is: ', num) + # load first segmentation and use it to create initial numpy arrays + temp = next(iter(segmentations.values())) + label = np.zeros(temp.shape) + result = np.zeros(temp.shape) + #loop through all available segmentations and tally votes + for key, s in segmentations.items(): + # count every label greater than 0 = tumor + label[s > 0] += 1.0*weights[key] + if verbose: + print('Weight for segmentation', key, 'is:', weights[key]) + if verbose: + print('Tumor class > 0 Max and Min: ', label.max(), + label.min(), label.dtype) + # create result and label it where the majority agreed + num = sum(weights.values()) + if verbose: + print('sum of all weights: ', num) + result[label >= (num/2.0)] = 1 + if verbose: + print('Shape of result:', result.shape) + print('Shape of current input array:', temp.shape) + print('Labels and datatype of current input:', temp.max(), + temp.min(), temp.dtype) + return result + +def simple(segmentations, weights, t=0.05, stop=25, inc=0.07, verbose=True, + method='dice', iterations=25): + """ Implementation of the SIMPLE algorithm + Iterative weights estimation to find new weights for majority + voting + segmentations are discarded if the weights falls too far behind + the expert segmentation (if score < t*max_score) + Input: + segmentations: list of numpy arrays with segmentations + weights: initial weights for the first iteration + t: initial threshold value + stop: if the number of changed labels per + iterations falls below this number, the + algorithm stops -> convergence + inc: increment for t after every iteration + (linear: t = t+inc) + verbose: Test output: True/False + method: Scoring method for performance + estimation, for options see the + documentation for customScore in score.py + iterations: max number of iterations before the algorithm stops + to keep the runtime bounded if there is no + convergence + + Return: + estimate: Numpy label array with same shape as the input + = Fused Segmentation + """ + # baseline estimate + estimate = wtMajorityVote(segmentations, weights, verbose=verbose) + #initial convergence baseline + conv = np.sum(estimate) + i = 0 + for i in range(iterations): + if np.sum(estimate) == 0: + return np.zeros(dim) + if verbose: + print('Tau is now:', t) + for key, s in segmentations.items(): + # assign score to every segmentation + weights[key] = score.customScore(s, estimate, method) #cube scores for brutal enhancement + if verbose: + print('Calculated score:', weights[key]) + # calculate maximum score + max_phi = max(weights.values()) + entriesToRemove = [] + for key, w in weights.items(): + # check if a segmentation falls below the threshold + if(w < t*max_phi): + entriesToRemove.append(key) + if verbose: + print('Will remove segmentation and weights score' + + 'for element:', key) + for k in entriesToRemove: + segmentations.pop(k, None) + weights.pop(k, None) + if verbose: + print('[STATUS] Now calculating a new estimate in iteration', i+1) + estimate = wtMajorityVote(segmentations, weights, verbose=verbose) + t = t+inc + # check if it converges + if np.abs(conv-np.sum(estimate)) < stop: + break + conv = np.sum(estimate) + # when finished, return the result + return estimate + +def fuse(directory, method='simple', verbose=True): + """ Overall fusion class, used to start any fusion process provided here + Input parameters: + directory = directory containing patients and an arbitrary number + of segmentation niftis + method = simple for SIMPLE fusion (default) + majority for an unweighted majority voting + w_majority for a weighted majority voting + Output: None, saves results to new file in every patient directory + """ + # preprocess files to ensure functionality + fmgnr.conversion(directory, verbose) + # class borders between whole tumor, active tumor and tumor core + evalclasses = { + 'wt': 0, + 'tc': 1, + 'at': 2 + } + # class labels as stated in the paper + labels = { + 'wt': 2, #edema + 'tc': 1, #tumor core NCR/NET + 'at': 4 # enhancing tumor + } + w = False + if method == 'simple': + iter = 25 + elif method == 'w_majority': + iter = 0 + w = True + else: + iter = 0 + # set params depending on the method used + #loop through brats dir + for patient in tqdm(os.listdir(directory)): + if not os.path.isdir(os.path.join(directory, patient)): + continue # Not a directory + if 'brats' in patient: + patpath = os.path.join(directory, patient) + a_weights = setWeights(weighted=w, method=evalclasses['wt'], directory=directory, verbose=verbose) + # init array for result + res = np.zeros(dim) + #iterate through 3 tumor classes, then build final segmentation labels + for key, value in evalclasses.items(): + if verbose: + print('[INFO] Now fusing the following region: ', key) + segmentations, weights = assembleFiles(patpath, a_weights, t=value, verbose=verbose) + #set fixed weights + temp = simple(segmentations, weights, iterations=iter, method='dice') + res[temp == 1] = labels[key] + savepath = os.path.join(patpath, 'fusion') + try: + os.mkdir(savepath) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + pass + oitk.write_itk_image(oitk.make_itk_image(res), os.path.join(os.path.join(savepath, + method + '_fusion.nii.gz'))) + +def testStaple(): + directory = '/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/fusion_tests/brats2016_test_2013_patx116_1' + a_weights = setWeights(weighted=False, method=0, directory=0, verbose=True) + # load prototype images to get dimensions + proto = oitk.get_itk_image('/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/fusion_tests/brats2016_test_2013_patx116_1/brats2016_test_2013_patx116_1_romainsauvestregevaertlab/tumor_gevaertlab_class.nii') + segs, _ = assembleFiles(directory, a_weights, verbose=True) + #result = staple(segs, proto) + dictlist = [] + for value in segs.items(): + dictlist.append(value) + foregroundValue = 1 + threshold = 0.95 + reference_segmentation_STAPLE_probabilities = sitk.STAPLE(dictlist, foregroundValue) + reference_segmentation_STAPLE = reference_segmentation_STAPLE_probabilities > threshold + oitk.write_itk_image(oitk.make_itk_image(reference_segmentation_STAPLE, proto), os.path.join(os.path.join(directory, 'staple_fusion.nii.gz'))) diff --git a/segment.py b/segment.py new file mode 100755 index 0000000..a87367f --- /dev/null +++ b/segment.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +__version__ = '0.0' +__author__ = 'Christoph Berger' + +import os +import errno +import argparse +import configparser +import time +import datetime + +import util.filemanager as filemanager +import docker + +# parse arguments for input, output directories and flags +parser = argparse.ArgumentParser() +parser.add_argument('directory', help='Run all available containers with \ +this base directory', action='store') +# args = parser.parse_args() +# Arranging paths +root = os.path.abspath(parser.parse_args().directory) +# initialize docker +#client = docker.Client() +client = docker.from_env() +# Expected modalities +modalities = ['flair.nii', 't1.nii', 't1c.nii', 't2.nii'] +# Open file for timing data +try: + log = open(os.path.join(root, 'time.txt'), mode='a') +except (OSError, IOError) as e: + print('Opening log file failed: ' + str(e)) + +noOfContainers = 0 +noOfRuns = 0 + +def assert_files(directory): + """ Rudimentary checks to ensure a clean data set and the + availability of the 4 required modalities + """ + print('Looking for BRATS data directory..') + for fn in os.listdir(directory): + if not os.path.isdir(os.path.join(directory, fn)): + continue # Not a directory + if 'brats' in fn: + print('Found pat data:', fn) + print('Checking data validity now') + files = os.listdir(os.path.join(directory, fn)) + assert set(modalities).issubset(files),\ + 'Not all required files are present!' + print('File check okay!') + +def get_containers(config_file): + """ Loads the config file with a list of available containers + """ + global noOfContainers + config = configparser.ConfigParser() + config.read(config_file) + # processes the config details + for each_section in config.sections(): + img_id = config[each_section]['img_id'] + # load the container command, default is empty + command = config[each_section].get('command',' ') + print(command) + mount = config[each_section]['mountpoint'] + # distinguish between a batch container (iterates over all + # patients in the root directory by itself) and a standard + # container (needs to be called for each patient individually) + if config[each_section].getboolean('batch'): + print('Found batch container: ', img_id) + if config[each_section].getboolean('superresult'): + try: + os.makedirs(os.path.join(root, 'results')) + except OSError as err: + if err.errno != errno.EEXIST: + raise + # check if the FLAIR modality is expected as flair.nii* or + # as fla.nii* and rename the file accordingly + if not config[each_section].getboolean('flair'): + filemanager.rename_flair(root) + else: + filemanager.rename_fla(root) + # if the code can't cope with more than the 4 standard + # modalities in the directory, this is indicated with + if config[each_section].getboolean('clean'): + filemanager.remove_nii(root) + else: + filemanager.create_files(root) + # run container with the specified input files + run_container(img_id, command, root, mount) + noOfContainers += 1 + # TODO: MKDIR results for batch container + # the results folder is renamed to enable distinction + rename_folder(img_id, root, 'res_br') + else: + print('Found standard container: ', img_id) + if not config[each_section].getboolean('flair'): + filemanager.rename_flair(root) + else: + filemanager.rename_fla(root) + if config[each_section].getboolean('clean'): + filemanager.remove_nii(root) + else: + filemanager.create_files(root) + # the iterative run script is called + run_iterate(img_id, command, root, mount) + noOfContainers += 1 + print('Total containers run: ', noOfContainers) + +def rename_folder(name, folder, brats_id): + """ Renames the results folder to the name of the + docker container used to create it + """ + # the following chars are removed to inhibit subdirectories + chars_to_remove = ['/','.','\\','+'] + # create dictionary of the chars to remove for the translate method + dd = {ord(c):None for c in chars_to_remove} + name = brats_id + '_' + name.translate(dd) + os.renames(os.path.join(folder, 'results'), + os.path.join(folder, name)) + +def run_container(img_id, commands, folder, mount): + """ + Runs the specified image using the docker api + with the passed commands and a mount of the folder + """ + # assemble mountpoint + global noOfRuns + noOfRuns += 1 + mountpoint = folder+':'+mount + print(mountpoint) + print('Starting execution now..') + # start timing of execution + start = time.time() + # assemble a new container using the docker api + container_id = client.create_container(img_id, command=commands, + stdin_open=True, tty=True, volumes=[folder], + host_config=client.create_host_config(binds=[mountpoint,]), + detach=False) + try: + container_id = client.containers.run(img_id, command=commands, volumes=[folder], + host_config=client.create_host_config(binds=[mountpoint,]), runtime=nvidia) + # client.start(container_id, runtime=nvidia) + except docker.errors.APIError as fail: + print(fail) + # wait for container to stop + client.wait(container_id) + try: + # should the container not stop in time, it is manually stopped + client.stop(container_id) + except docker.errors.APIError as fail: + print(fail) + end = time.time() + # the container (not needed anymore) is removed to clean the system + client.remove_container(container_id) + print('Container run successful') + print('This run took', (end-start), 'seconds.') + print('------------------- Total runs: ', noOfRuns,' ----------------') + # write the relevant parameters to the logfile + logstr = str('TIMESTAMP: ' + str(datetime.datetime.now()) + + '\nContainer:' +str(img_id)+'\nPatient '+ + str(folder)+'\nTime: '+str(end-start)+'\n\n') + log.write(logstr) + +def run_iterate(img_id, command, directory, mount): + """ Iterates through a given directory with subdirectories + containing the four needed modalities and sends each matching + subdirectory to the run_container function + """ + print('Looking for BRATS data directories..') + for fn in os.listdir(directory): + if not os.path.isdir(os.path.join(directory, fn)): + continue # Not a directory + if 'brats' in fn: + print('Found pat data: ', fn) + try: + os.makedirs(os.path.join(os.path.join(directory, fn), + 'results')) + except OSError as err: + if err.errno != errno.EEXIST: + raise + print('Calling Container: ', img_id) + run_container(img_id, command, os.path.join(directory, fn), + mount) + #rename folder and prepend pat_id + rename_folder(img_id, os.path.join(directory, fn), fn) + +#clean the entire input directory +#filemanager.completeclean(root) +# assert_files(root) +#filemanager.create_files(root, True) +# get the containers and start the segmentation +get_containers('config.txt') +# final cleanup +filemanager.rename_fla(root) +log.write('---------------Script run finished.----------------\n\n') +log.close() +# client.prune_containers() # Non-functional with new Docker version? +# client.prune_volumes() diff --git a/util/__init__.py b/util/__init__.py new file mode 100755 index 0000000..8b13789 --- /dev/null +++ b/util/__init__.py @@ -0,0 +1 @@ + diff --git a/util/clean.py b/util/clean.py new file mode 100755 index 0000000..f14deca --- /dev/null +++ b/util/clean.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" Cleans the specified directory (removes all nii.gz files, results and +subfolders) +""" +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +__version__ = '0.0' +__author__ = 'Christoph Berger' + +import os +import argparse +import filemanager as filemanager + +# parse arguments for input, output directories and flags +parser = argparse.ArgumentParser() +parser.add_argument('directory', help='Clean everything in this directory', action='store') +# args = parser.parse_args() +# Arranging paths +root = os.path.abspath(parser.parse_args().directory) + +#clean everything +filemanager.completeclean(root) diff --git a/util/filemanager.py b/util/filemanager.py new file mode 100755 index 0000000..56e8653 --- /dev/null +++ b/util/filemanager.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +"""Module containing functions to manage the BRATS files + +""" +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +import os +import numpy as np +import glob +import shutil +import fnmatch +import util.own_itk as oitk + +modalities = ['fla','t1','t1c','t2'] + +def loadGT(path, patid, file='gt.nii.gz', verbose=True): + """ Loads the Ground Truth for a specified patient + from a given Ground Truth root directory + In: dir, path to the GT folder + patid, patient ID + verbose: True/False terminal output + Out: itk image! -> convert to numpy array + """ + for directory in os.listdir(path): + if not os.path.isdir(os.path.join(path, directory)): + continue # Not a directory + if patid == directory: + if verbose: + print('Loading GT for Patient', patid, 'now..') + patpath = os.path.join(path, patid) + #TODO change loading to support GT for different methods + groundtruth = oitk.get_itk_image(os.path.join(patpath, file)) + break + return groundtruth + +def convertLabels(originalFile, oldlabels, newlabels=[0,1,2,4]): + proto_img = oitk.get_itk_image(originalFile) + labelfile = oitk.get_itk_array(proto_img) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + converted = np.zeros(labelfile.shape) + for oldlabel, newlabel in zip(oldlabels, newlabels): + converted[labelfile == oldlabel] = newlabel + oitk.write_itk_image(oitk.make_itk_image(converted, proto_img), originalFile) + +def fileFinder(srcPath, filetofind, func=convertLabels, verbose=True): + """ finds a file starting from the source path in subdirectories + and runs an arbitrary function on them + """ + if verbose: + print(srcPath) + print(filetofind) + for filename in glob.iglob(srcPath+'/**/'+filetofind, recursive=True): + func(filename, [0,1,2,4], [0,2,1,4]) + +def touchAndConvert(originalFile, gt, verbose=True): + """ Loads the ITK image and saves it with proper + header data (and conversion to 8-bit unint) + """ + proto_img = oitk.get_itk_image(originalFile) + labelfile = oitk.get_itk_array(proto_img) + segm_img = oitk.make_itk_image(labelfile, gt) + oitk.write_itk_image(segm_img, originalFile) + +def fileIterator(directory, gt_root, verbose=True): + for patient in os.listdir(directory): + patpath = os.path.join(directory, patient) + if not os.path.isdir(patpath): + continue # Not a directory + if 'brats' in patient: + #loads itk ground truth + gt = loadGT(gt_root, patient, file='gt.nii') + if verbose: + print ('Current patient:', patient) + # loop through patient folder + for result in os.listdir(patpath): + if not os.path.isdir(os.path.join(patpath, result)): + continue # Not a directory + respath = os.path.join(patpath, result) + paths = os.listdir(respath) + for result in paths: + # if there is a results file, run the conversion + if fnmatch.fnmatch(result, '*.nii*'): + if verbose: + print('Will convert the following file:', result) + touchAndConvert(os.path.join(respath, result), gt, True) + +def remove_nii(root): + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if file+'.nii' in modalities: + os.remove(os.path.join(subdir, file+'.nii')) + +def create_files(root, gz=False): + # create nii.gz versions from nii for compatibility + print(root) + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + # files = os.listdir(os.path.join(root, fn)) + for file in modalities: + path = os.path.join(os.path.join(root, fn), file) + proto_image = oitk.get_itk_image(path+str('.nii')) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + oitk.write_itk_image(proto_image, path+str('.nii.gz')) + +def clean(root, gz=False, dir=False): + """ Removes all subfolders and leaves only .nii and .nii.gz input + files untouched + root: path to folder with subfolers + gz: If True, compressed Nifti files are also removed + """ + # Remove subfolders + for fn in os.listdir(root): + subdir = os.path.join(root, fn) + if not os.path.isdir(subdir): + continue + for file in os.listdir(subdir): + if dir and os.path.isdir(os.path.join(subdir, file)): + shutil.rmtree(os.path.join(subdir, file)) + if gz and '.nii.gz' in file: + os.remove(os.path.join(subdir, file)) + +def validate_files(root): + """ Checks if all input directories contain the right files + """ + print('Looking for BRATS data directory..') + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + print('Found pat data:', fn) + print('Checking data validity now') + files = os.listdir(os.path.join(root, fn)) + if not set(modalities).issubset(files): + print('Not all required files are present!') + return False + print('File check okay!') + return True + +def rename_flair(root): + """ Renames flair.nii files to fla.nii if required + """ + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if 'flair' in file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, file.replace('flair', 'fla'))) + +def rename_fla(root): + """ Renames fla.nii files to flair.nii if required + """ + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + files = os.listdir(os.path.join(root, fn)) + subdir = os.path.join(root, fn) + for file in files: + if 'fla.nii' == file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, + file.replace('fla.nii', 'flair.nii'))) + if 'fla.nii.gz' == file: + os.rename(os.path.join(subdir, file), + os.path.join(subdir, + file.replace('fla.nii.gz', 'flair.nii.gz'))) + +def reduce_filesize(root, gz=False): + # create nii.gz versions from nii for compatibility + for fn in os.listdir(root): + if not os.path.isdir(os.path.join(root, fn)): + continue # Not a directory + if 'brats' in fn: + # files = os.listdir(os.path.join(root, fn)) + for file in modalities: + path = os.path.join(os.path.join(root, fn), file) + proto_image = oitk.get_itk_image(path+str('.nii')) + # segm_im = oitk.make_itk_image(proto_image, proto_image) + oitk.write_itk_image(proto_image, path+str('.nii.gz')) + +def completeclean(root): + # maybe remove the root-results folder as well + clean(root, False, True) + +def conversion(segmentations, verbose=True): + gt_root = '/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/testing_nii_LABELS' + #segmentations = '/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/Complete_Results' + fileIterator(segmentations, gt_root) diff --git a/util/own_itk.py b/util/own_itk.py new file mode 100755 index 0000000..7993198 --- /dev/null +++ b/util/own_itk.py @@ -0,0 +1,270 @@ +# -*- coding: UTF-8 -*- +"""Module containing functions enabling to read, make and +write ITK images. + +""" +__version__ = '0.2' +__author__ = 'Esther Alberts' + +import os +import numpy as np +import SimpleITK as itk + +def reduce_arr_dtype(arr, verbose=False): + """ Change arr.dtype to a more memory-efficient dtype, without changing + any element in arr. """ + + if np.all(arr-np.asarray(arr,'uint8') == 0): + if arr.dtype != 'uint8': + if verbose: + print('Converting '+str(arr.dtype)+' to uint8 np.ndarray') + arr = np.asarray(arr, dtype='uint8') + elif np.all(arr-np.asarray(arr,'int8') == 0): + if arr.dtype != 'int8': + if verbose: + print('Converting '+str(arr.dtype)+' to int8 np.ndarray') + arr = np.asarray(arr, dtype='int8') + elif np.all(arr-np.asarray(arr,'uint16') == 0): + if arr.dtype != 'uint16': + if verbose: + print('Converting '+str(arr.dtype)+' to uint16 np.ndarray') + arr = np.asarray(arr, dtype='uint16') + elif np.all(arr-np.asarray(arr,'int16') == 0): + if arr.dtype != 'int16': + if verbose: + print('Converting '+str(arr.dtype)+' to int16 np.ndarray') + arr = np.asarray(arr, dtype='int16') + + return arr + +def make_itk_image(arr, proto_image=None, verbose=True): + """Create an itk image given an image array. + + Parameters + ---------- + arr : ndarray + Array to create an itk image with. + proto_image : itk image, optional + Proto itk image to provide Origin, Spacing and Direction. + + Returns + ------- + image : itk image + The itk image containing the input array `arr`. + + """ + + arr = reduce_arr_dtype(arr, verbose=verbose) + + image = itk.GetImageFromArray(arr) + if proto_image != None: + image.CopyInformation(proto_image) + + return image + +def write_itk_image(image, path): + """Write an itk image to a path. + + Parameters + ---------- + image : itk image or np.ndarray + Image to be written. + path : str + Path where the image should be written to. + + """ + + if isinstance(image, np.ndarray): + image = make_itk_image(image) + + writer = itk.ImageFileWriter() + writer.SetFileName(path) + + if os.path.splitext(path)[1] == '.nii': + Warning('You are converting nii, ' + \ + 'be careful with type conversions') + + writer.Execute(image) + +def get_itk_image(path_or_image): + """Get an itk image given a path. + + Parameters + ---------- + path : str or itk.Image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images*. + + Returns + ------- + image : itk image + The itk image. + + """ + if isinstance(path_or_image, itk.Image): + return path_or_image + + if not os.path.exists(path_or_image): + err = path_or_image + ' doesnt exist' + raise AttributeError(err) + + reader = itk.ImageFileReader() + reader.SetFileName(path_or_image) + + image = reader.Execute() + + return image + +def get_itk_array(path_or_image): + """ Get an image array given a path or itk image. + + Parameters + ---------- + path_or_image : str or itk image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images* or an itk image. + + Returns + ------- + arr : ndarray + Image ndarray contained in the given path or the itk image. + + """ + + if isinstance(path_or_image, np.ndarray): + return path_or_image + + elif isinstance(path_or_image, str): + image = get_itk_image(path_or_image) + + elif isinstance(path_or_image, itk.Image): + image = path_or_image + + else: + err = 'Image type not recognized: ' + str(type(path_or_image)) + raise RuntimeError(err) + + arr = itk.GetArrayFromImage(image) + + return arr + +def copy_image_info(input_path, ref_path): + """ Copy origin, direction and spacing information from ref_path + into the header in input_path. """ + + print('OVerwriting '+input_path[-50:]) + + ref_im = get_itk_image(ref_path) + im = get_itk_image(input_path) + + dim = im.GetSize() + if dim != ref_im.GetSize(): + err = 'Images are not of same dimension, I will not copy image info!' + raise RuntimeError(err) + + im.SetOrigin(ref_im.GetOrigin()) + im.SetDirection(ref_im.GetDirection()) + im.SetSpacing(ref_im.GetSpacing()) + + if im.GetSize() != dim: + err = 'Dimension changed during copying image info: aborting' + raise RuntimeError(err) + + write_itk_image(im, input_path) + +def load_arr_from_paths(paths): + """ For every str in paths (paths can consis of nested lists), + load the image at this path. If any str is not a path, an error + is thrown. All other objects are preserved. """ + + if isinstance(paths, str): + im_arrs = get_itk_array(paths) + elif isinstance(paths, (list, tuple)): + for i, sub_paths in enumerate(paths): + paths[i] = load_arr_from_paths(sub_paths) + im_arrs = paths + else: + im_arrs = paths + + return im_arrs + +def get_itk_data(path_or_image, verbose=False): + """Get the image array, image size and pixel dimensions given an itk + image or a path. + + Parameters + ---------- + path_or_image : str or itk image + Path pointing to an image file with extension among + *TIFF, JPEG, PNG, BMP, DICOM, GIPL, Bio-Rad, LSM, Nifti (.nii and .nii.gz), + Analyze, SDT/SPR (Stimulate), Nrrd or VTK images* or an itk image. + verbose : boolean, optional + If true, print image shape, spacing and data type of the image + corresponding to `path_or_image.` + + Returns + ------- + arr : ndarray + Image array contained in the given path or the itk image. + shape : tuple + Shape of the image array contained in the given path or the itk + image. + spacing : tuple + Pixel spacing (resolution) of the image array contained in the + given path or the itk image. + + """ + + if isinstance(path_or_image, np.ndarray): + arr = path_or_image + spacing = None + else: + if isinstance(path_or_image, str): + image = get_itk_image(path_or_image) + else: + image = path_or_image + arr = itk.GetArrayFromImage(image) + spacing = image.GetSpacing()[::-1] + + shape = arr.shape + data_type = arr.dtype + + if verbose: + + print('\t image shape: ' + str(shape)) + print('\t image spacing: ' + str(spacing)) + print('\t image data type: ' + str(data_type)) + + return arr, shape, spacing + +def read_dicom(source_path, verbose=True): + '''Reads dicom series into an itk image. + + Parameters + ---------- + source_path : string + path to directory containing dicom series. + verbose : boolean + print out all series file names. + + Returns + ------- + image : itk image + image volume. + ''' + + reader = itk.ImageSeriesReader() + names = reader.GetGDCMSeriesFileNames(source_path) + if len(names) < 1: + raise IOError('No Series can be found at the specified path!') + elif verbose: + print('image series with %d dicom files found in : %s' \ + % (len(names), source_path[-50:])) + reader.SetFileNames(names) + image = reader.Execute() + if verbose: + get_itk_data(image, verbose=True) + + return image diff --git a/util/score.py b/util/score.py new file mode 100755 index 0000000..7ca1dfc --- /dev/null +++ b/util/score.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Author: Christoph Berger +# Script for evaluation and bulk segmentation of Brain Tumor Scans +# using the MICCAI BRATS algorithmic repository +# +# Please refer to README.md and LICENSE.md for further documentation +# This software is not certified for clinical use. + +import os +import scipy.io as spio +import numpy as np +import csv +import fnmatch +from natsort import natsorted +import collections +import sys +import math +import pandas +import time +import pickle +from tqdm import tqdm + +import util.filemanager as fmg +import util.own_itk as oitk +# import matlab.engine + +#for testing only +root = '/Users/christoph/Documents/University/Uni/Bachelorarbeit/Testdaten/Complete_Results' +gt_root = '/Users/christoph/Documents/University/Uni/Bachelorarbeit/Testdaten/testing_nii_LABELS' + +#dictionary for thresholds depending on the scoring method +thresh = { + 'wt': 0, + 'tc': 1, + 'at': 2 +} + +def customScore(seg, gt, method='dice'): + """ Calculates a similarity score based on the + method specified in the parameters + Input: Numpy arrays to be compared, need to have the + same dimensions (shape) + Default scoring method: DICE coefficient + method may be: 'dice' + 'auc' + 'bdice' + returns: a score [0,1], 1 for identical inputs + """ + try: + # True Positive (TP): we predict a label of 1 (positive) and the true label is 1. + TP = np.sum(np.logical_and(seg == 1, gt == 1)) + # True Negative (TN): we predict a label of 0 (negative) and the true label is 0. + TN = np.sum(np.logical_and(seg == 0, gt == 0)) + # False Positive (FP): we predict a label of 1 (positive), but the true label is 0. + FP = np.sum(np.logical_and(seg == 1, gt == 0)) + # False Negative (FN): we predict a label of 0 (negative), but the true label is 1. + FN = np.sum(np.logical_and(seg == 0, gt == 1)) + FPR = FP/(FP+TN) + FNR = FN/(FN+TP) + TPR = TP/(TP+FN) + TNR = TN/(TN+FP) + except ValueError: + print('Value error encountered!') + return 0 + # faster dice? Oh yeah! + if method is 'dice': + # default dice score + score = 2*TP/(2*TP+FP+FN) + elif method is 'auc': + # AUC scoring + score = 1 - (FPR+FNR)/2 + elif method is 'bdice': + # biased dice towards false negatives + score = 2*TP/(2*TP+FN) + elif method is 'spec': + #specificity + score = TN/(TN+FP) + elif method is 'sens': + # sensitivity + score = TP/(TP+FN) + elif method is 'toterr': + score = (FN+FP)/(155*240*240) + elif method is 'ppv': + prev = np.sum(gt)/(155*240*240) + temp = TPR*prev + score = (temp)/(temp + (1-TNR)*(1-prev)) + else: + score = 0 + if np.isnan(score) or math.isnan(score): + score = 0 + return score + +def surfaceMask(gt, r, verbose=True): + """ + Create a mask for the surface region for a given + circumference around the border of the volume + Example: r=1 + Volume: Mask (True/False) + 0 0 0 0 0 = F T F F F + 0 1 0 0 0 = T T T F F + 0 1 1 0 0 = T T T T F + 0 0 0 0 0 = F T T F F + """ + # create a mask with the same shape as the input array + mask = np.array(gt.shape(), dtype=np.bool) + + return mask +def surfaceDice(gt, seg, mask, verbose=True): + try: + # True Positive (TP): we predict a label of 1 (positive) and the true label is 1. + TP = np.sum(np.logical_and(np.logical_and(seg == 1, gt == 1), mask)) + # True Negative (TN): we predict a label of 0 (negative) and the true label is 0. + TN = np.sum(np.logical_and(np.logical_and(seg == 0, gt == 0), mask)) + # False Positive (FP): we predict a label of 1 (positive), but the true label is 0. + FP = np.sum(np.logical_and(np.logical_and(seg == 1, gt == 0), mask)) + # False Negative (FN): we predict a label of 0 (negative), but the true label is 1. + FN = np.sum(np.logical_and(np.logical_and(seg == 0, gt == 1), mask)) + FPR = FP/(FP+TN) + FNR = FN/(FN+TP) + TPR = TP/(TP+FN) + TNR = TN/(TN+FP) + except ValueError: + print('Value error encountered!') + return 0 + # return DICE score for the surface region only + return (2*TP/(2*TP+FP+FN)) + +def interRaterScore(root, score=None, method='at', verbose=True): + """ Calculates and stores the DICE scores for all patients and + segmentations found in the passed directory + Uses a ground truth file with 4 labels [0,1,2,4] which is expected + to be named _gt.nii.gz and in the patient directory + (e.g. root/pat123/wt_gt.nii.gz) + Binary scoring: whole tumor (wt): label 0 and labels > 0 + tumor core (tc): label 0 and labels > 1 + active tumor (at): label 0 and labels > 2 + Scoring method is specified via method parameter + In: root, path to segmentations and GT + method, scoring method to use, see above + verbose, output to the CLI or not + Out: Nothing + """ + + candidates = { + 'aju' : 'tumor_istb_aj_class.nii', + 'aca' : 'data_data_prediction.nii.gz', + 'kch' : 'tumor_qtimlab_class.nii.gz', + 'ekr' : 'tumor_00000000_class.nii.gz', + 'aka' : 'tumor_kamleshp_class.nii.gz', + 'mag' : 'tumor_magnrbm_class.nii', + 'sse' : 'tumor_saras_tb_class.nii.gz', + 'rsa': 'tumor_gevaertlab_class.nii', + 'gwa' : 'brats_dc_brats2016_test_klhd_pat101_3.nii.gz', + 'ise' : 'tumor_brats2017_isensee_class.nii.gz', + 'mav' : 'majvote_fusion.nii.gz', + 'sim' : 'simple_fusion.nii.gz', + 'sim2': 'simple2_fusion.nii.gz', + 'none' : 'default' + } + # preallocate dictionary space + scores = {} + # create indices for dataframes + rows = list(candidates.keys()) + i = 0 + for patient in tqdm(os.listdir(root)): + if not os.path.isdir(os.path.join(root, patient)): + continue # Not a directory + if 'brats' in patient: + # create new dataframe + patpath = os.path.join(root, patient) + df = pandas.DataFrame(data=None, index=os.listdir(patpath), columns=os.listdir(patpath), dtype=float) + #print('Current Patient: ', patient) + # loop through patient folder + for gt_folder in os.listdir(patpath): + #print('Current GT Folder: ', gt_folder) + # load arbitary ground truth file + respath = os.path.join(patpath, gt_folder) + paths = os.listdir(respath) + for gt_cand in paths: + if fnmatch.fnmatch(gt_cand, '*.nii*'): + #print('GT is now: ', gt_cand) + gt_temp = oitk.get_itk_array(oitk.get_itk_image(os.path.join(respath, gt_cand))) + gt = np.zeros(gt_temp.shape) + #assemble final gt array + if method == 'tc': + #tumor core: may be labels 1,3,4 + gt[gt_temp > 0] = 1 + gt[gt_temp == 2] = 0 + elif method == 'wt': + # whole tumor: labels 1,2,3,4 in participant's segmentation + gt[gt_temp > 0] = 1 + else: + #active tumor + gt[gt_temp == 4] = 1 + for subfolder in os.listdir(patpath): + respath = os.path.join(patpath, subfolder) + paths = os.listdir(respath) + paths = natsorted(paths, key=lambda y: y.lower()) + for result in paths: + # if there is a results file, load it and compute the dice score + if fnmatch.fnmatch(result, '*.nii*'): + temp = oitk.get_itk_array(oitk.get_itk_image(os.path.join(respath, result))) + pred = np.zeros(temp.shape) + if method == 'tc': + #tumor core: may be labels 1,3,4 + pred[temp > 0] = 1 + pred[temp == 2] = 0 + elif method == 'wt': + # whole tumor: labels 1,2,3,4 in participant's segmentation + pred[temp > 0] = 1 + else: + #active tumor + pred[temp == 4] = 1 + i = i+1 + #gt_cand = gt_cand.translate({ord(i):None for i in '!/._-'}) + #result = result.translate({ord(i):None for i in '!/._-'}) + df[gt_folder][subfolder] = customScore(gt, pred, method='dice') + scores[patient] = df + + with open('results.pkl', 'wb') as f: + pickle.dump(scores, f, pickle.HIGHEST_PROTOCOL) + + """ Master Plan: + + for each patient: + for each result: + for each other file: + getScore + df[current-gt][current-seg] = score + dict['patient'] = df + """ + +def getScore(root, gt_root, score=None, file='gt.nii.gz', method='wt', verbose=True): + """ Calculates and stores the DICE scores for all patients and + segmentations found in the passed directory + Uses a ground truth file with 4 labels [0,1,2,4] which is expected + to be named _gt.nii.gz and in the patient directory + (e.g. root/pat123/wt_gt.nii.gz) + Binary scoring: whole tumor (wt): label 0 and labels > 0 + tumor core (tc): label 0 and labels > 1 + active tumor (at): label 0 and labels > 2 + Scoring method is specified via method parameter + In: root, path to segmentations and GT + method, scoring method to use, see above + verbose, output to the CLI or not + Out: Nothing + """ + # load thresh, default is again 'wt' = 0 + t = thresh.get(method, 0) + scores = dict() + root = os.path.abspath(root) + #open file for results + try: + f = open(os.path.join(root, method + '_scores.csv'), 'a') + except IOError: + print('Could not read file:' , method + '_scores.csv') + sys.exit() + + for patient in os.listdir(root): + if not os.path.isdir(os.path.join(root, patient)): + continue # Not a directory + if 'brats' in patient: + patpath = os.path.join(root, patient) + #TODO change loading to support GT for different methods + gt_temp = oitk.get_itk_array(fmg.loadGT(gt_root, patient, file=file)) + gt = np.zeros(gt_temp.shape) + if t == 1: + # tumor core: labels 3 and 4 + gt[gt_temp > 2] = 1 + elif t == 0: + #tissue is 1 in GT + gt[gt_temp > 1] = 1 + else: + # active tumor label 4 + gt[gt_temp == 4] = 1 + if verbose: + print ('Sanity check of dimensions:', gt.shape) + print ('Current patient:', patient) + print ('GT max/min:', gt.max(), gt.min()) + #print ('Tumor Voxels: ', gt.sum()) + # init empty dictionary dimension + scores[patient] = collections.OrderedDict() + # loop through patient folder + for result in os.listdir(patpath): + if not os.path.isdir(os.path.join(patpath, result)): + continue # Not a directory + # sort pathlist to get a pretty csv file + respath = os.path.join(patpath, result) + paths = os.listdir(respath) + paths = natsorted(paths, key=lambda y: y.lower()) + for result in paths: + # if there is a results file, load it and compute the dice score + if fnmatch.fnmatch(result, '*.nii*'): + temp = oitk.get_itk_array(oitk.get_itk_image(os.path.join(respath, result))) + pred = np.zeros(temp.shape) + if t == 1: + #tumor core: may be labels 1,3,4 + pred[temp > 0] = 1 + pred[temp == 2] = 0 + elif t == 0: + # whole tumor: labels 1,2,3,4 in participant's segmentation + pred[temp > 0] = 1 + else: + #active tumor + pred[temp == 4] = 1 + if verbose: + pass + # print ('Segmentation max/min:', pred.max(), pred.min()) + dice = customScore(pred, gt, score) + # save dice score to dictionary + scores[patient][result] = dice + if verbose: + print('DICE Score for', result, 'is', dice) + if verbose: + print('Current Scores:\n', scores[patient]) + #save scores to csv with proper prefix + scores[patient] = collections.OrderedDict(sorted(scores[patient].items(), key=lambda t: t[0])) + scores[patient]['patient'] = patient + # with open(os.path.join(root, method + '_dice.csv'), 'a') as f: + w = csv.DictWriter(f, scores[patient].keys()) + # if os.stat(os.path.join(root, method + '_dice.csv')).st_size == 0: + w.writeheader() + w.writerow(scores[patient]) + if verbose: + print (scores) + f.close() + +""" +def gtConverter(path='/Users/christoph/Documents/University/Uni/Bachelorarbeit/Testdaten/gt_scoring', verbose=True): + # exported to MATLAB + eng = matlab.engine.start_matlab() + eng.cd('nifti_conversion/') + ret = eng.convert_nii(path, nargout=1) + if verbose: + print('Number or files converted:', ret) +""" +def fullScore(s_dir='/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/Complete_Results', gt_dir='/Users/christoph/Documents/Uni/Bachelorarbeit/Testdaten/testing_nii_LABELS'): + getScore(s_dir, gt_dir, score='ppv', file='gt.nii', method='wt', verbose=True) + getScore(s_dir, gt_dir, score='ppv', file='gt.nii', method='at', verbose=True) + getScore(s_dir, gt_dir, score='ppv', file='gt.nii', method='tc', verbose=True) + +def tests(): + """ + Simple tests to ensure correct scoring performance + """ + test = np.array([[1,0,0,1],[1,1,0,0],[1,0,1,1],[0,0,0,0]]) + truth = np.array([[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,1]]) + assert (2/3) == customScore(test, truth, method='dice') + assert 0.75 == customScore(test, truth, method='spec') + assert 0.625 == customScore(test, truth, method='sens') + pred1 = np.array([[[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,1]],[[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,3]],[[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,1]],[[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,1]]]) + pred2 = np.array([[[0,1,1,0],[1,1,1,1],[1,1,0,1],[1,0,0,1]],[[1,0,0,1],[1,2,2,1],[0,3,1,1],[1,1,1,1]],[[0,0,0,0],[1,0,0,1],[1,0,0,1],[1,1,0,1]],[[1,1,0,1],[1,1,0,1],[1,1,0,1],[1,1,0,1]]]) + print('DICE Pred1 vs Pred2: ', customScore(pred1, pred2, method='dice')) + print('DICE Pred2 vs Pred1: ', customScore(pred2, pred1, method='dice')) \ No newline at end of file