diff --git a/examples/easy_start/nipype_preproc_spm_auditory.py b/examples/easy_start/nipype_preproc_spm_auditory.py
index c8b9211c..0fdab3ab 100644
--- a/examples/easy_start/nipype_preproc_spm_auditory.py
+++ b/examples/easy_start/nipype_preproc_spm_auditory.py
@@ -9,7 +9,6 @@
import nibabel
from pypreprocess.nipype_preproc_spm_utils import do_subjects_preproc
from pypreprocess.datasets import fetch_spm_auditory
-from pypreprocess.reporting.glm_reporter import generate_subject_stats_report
import pandas as pd
from nilearn.glm.first_level.design_matrix import (make_first_level_design_matrix,
check_design_matrix)
diff --git a/examples/pipelining/nipype_preproc_spm_multimodal_faces.py b/examples/pipelining/nipype_preproc_spm_multimodal_faces.py
index 5c32e421..a764991a 100644
--- a/examples/pipelining/nipype_preproc_spm_multimodal_faces.py
+++ b/examples/pipelining/nipype_preproc_spm_multimodal_faces.py
@@ -20,8 +20,6 @@
# pypreprocess imports
from pypreprocess.datasets import fetch_spm_multimodal_fmri
-from pypreprocess.reporting.base_reporter import ProgressReport
-from pypreprocess.reporting.glm_reporter import generate_subject_stats_report
from pypreprocess.nipype_preproc_spm_utils import do_subject_preproc
from pypreprocess.subject_data import SubjectData
diff --git a/examples/pipelining/nistats_glm_fsl_feeds_fmri.py b/examples/pipelining/nistats_glm_fsl_feeds_fmri.py
index cd1ed509..c588d8b7 100644
--- a/examples/pipelining/nistats_glm_fsl_feeds_fmri.py
+++ b/examples/pipelining/nistats_glm_fsl_feeds_fmri.py
@@ -14,8 +14,6 @@
import time
from pypreprocess.nipype_preproc_spm_utils import (SubjectData,
do_subjects_preproc)
-from pypreprocess.reporting.glm_reporter import generate_subject_stats_report
-from pypreprocess.reporting.base_reporter import ProgressReport
from pypreprocess.datasets import fetch_fsl_feeds
from pypreprocess.io_utils import compute_mean_3D_image
diff --git a/pypreprocess/conf_parser.py b/pypreprocess/conf_parser.py
index 2891433f..4a69617c 100644
--- a/pypreprocess/conf_parser.py
+++ b/pypreprocess/conf_parser.py
@@ -445,9 +445,4 @@ def _ignore_subject(subject_id):
return subjects, preproc_params
# this pseudo is better
-import_data = _generate_preproc_pipeline
-
-
-if __name__ == '__main__':
- from pypreprocess.reporting.base_reporter import dict_to_html_ul
- print(dict_to_html_ul(_parse_job("job.conf")))
+import_data = _generate_preproc_pipeline
\ No newline at end of file
diff --git a/pypreprocess/nipype_preproc_spm_utils.py b/pypreprocess/nipype_preproc_spm_utils.py
index 48f58b5f..c251c488 100644
--- a/pypreprocess/nipype_preproc_spm_utils.py
+++ b/pypreprocess/nipype_preproc_spm_utils.py
@@ -34,7 +34,7 @@
_do_subject_smooth as _pp_do_subject_smooth)
from nilearn.plotting.html_document import HTMLDocument
-from .reporting.preproc_reporter import (_set_templates,embed_in_HTML,
+from .reporting.pypreproc_reporter import (_set_templates,embed_in_HTML,
initialize_report, add_component, finalize_report,
generate_realignment_report, generate_registration_report,
generate_corregistration_report, generate_segmentation_report,
diff --git a/pypreprocess/purepython_preproc_utils.py b/pypreprocess/purepython_preproc_utils.py
index b2f83955..271c01aa 100644
--- a/pypreprocess/purepython_preproc_utils.py
+++ b/pypreprocess/purepython_preproc_utils.py
@@ -8,7 +8,7 @@
import os
import inspect
-from .reporting.preproc_reporter import (
+from .reporting.pypreproc_reporter import (
generate_preproc_steps_docstring)
from joblib import Memory
from .io_utils import get_basenames, save_vols, is_niimg, load_vols
diff --git a/pypreprocess/reporting/glm_reporter.py b/pypreprocess/reporting/glm_reporter.py
deleted file mode 100644
index 95940eeb..00000000
--- a/pypreprocess/reporting/glm_reporter.py
+++ /dev/null
@@ -1,485 +0,0 @@
-import os
-import sys
-import numpy as np
-import pylab as pl
-import nibabel
-from nilearn.plotting import plot_stat_map
-from nilearn.image import reorder_img
-from pypreprocess.external.nistats.design_matrix import plot_design_matrix
-from pypreprocess.external.nistats.glm import FirstLevelGLM
-import pandas as pd
-from nilearn.masking import intersect_masks
-from . import base_reporter
-from ..cluster_level_analysis import cluster_stats
-
-
-def generate_level1_stats_table(zmap, mask,
- output_html_path,
- p_threshold=.001,
- z_threshold=None,
- height_control='fpr',
- cluster_th=15,
- null_zmax='bonferroni',
- null_smax=None,
- null_s=None,
- nmaxima=4,
- cluster_pval=.05,
- title=None):
- """Function to generate level 1 stats table for a contrast.
-
- Parameters
- ----------
- zmap: image object
- z-map data image
-
- mask: image object or string
-
-
- brain mask defining ROI
-
- output_html_path: string,
- path where the output html should be written
-
- p_threshold: float (optional, default .001)
- (p-variate) frequentist threshold of the activation image
-
- z_threshold: float (optional, default None)
- Threshold that has been applied to Z map (input z_map)
-
- height_control: string
- false positive control meaning of cluster forming
- threshold: 'fpr'|'fdr'|'bonferroni'|'none'
-
- cluster_th: int (optional, default 15)
- cluster size threshold (in # voxels)
-
- null_zmax: string (optional, default 'bonferroni')
- parameter for cluster level statistics (?)
-
- null_s: strint (optional, default None)
- parameter for cluster level statistics (?)
-
- nmaxima: int (optional, default 4)
- number of local maxima reported per supra-threshold cluster
-
- title: string (optional)
- title of generated stats table
-
- """
- # sanity
- if isinstance(zmap, str):
- zmap = nibabel.load(zmap)
- if isinstance(mask, str):
- mask = nibabel.load(mask)
-
- # Compute cluster statistics
- nulls = {'zmax': null_zmax, 'smax': null_smax, 's': null_s}
-
- # do some sanity checks
- if title is None:
- title = "Level 1 Statistics"
- clusters, _ = cluster_stats(zmap, mask, p_threshold,
- nulls=nulls, cluster_th=cluster_th,
- height_control=height_control)
- if clusters is not None:
- clusters = [c for c in clusters if c['cluster_p_value'] < cluster_pval]
- else:
- clusters = []
-
- # Make HTML page
- page = open(output_html_path, mode="w")
- page.write("
\n")
- page.write("%s\n" % title)
- page.write("\n")
- page.write(" Voxel significance | \
- Coordinates in MNI referential | \
- Cluster Size |
\n")
- page.write("p FWE corr (Bonferroni) | \
- p FDR corr | Z | p uncorr | ")
- page.write(" x (mm) | y (mm) | z (mm) | \
- (voxels) |
\n")
-
- for cluster in clusters:
- maxima = cluster['maxima']
- size = cluster['size']
- for j in range(min(len(maxima), nmaxima)):
- temp = ["%.3f" % cluster['fwer_p_value'][j]]
- temp.append("%.3f" % cluster['fdr_p_value'][j])
- temp.append("%.2f" % cluster['z_score'][j])
- temp.append("%.3f" % cluster['p_value'][j])
- for it in range(3):
- temp.append("%.0f" % maxima[j][it])
- if j == 0:
- # Main local maximum
- temp.append('%i' % size)
- page.write('' + ' | \
- '.join(temp) + ' |
')
- else:
- # Secondary local maxima
- page.write('' + ' | \
- '.join(temp) + ' | |
\n')
-
- nclust = len(clusters)
- nvox = sum([clusters[k]['size'] for k in range(nclust)])
-
- # finish up
- page.write("
\n")
- page.write("Threshold Z: %.2f (%s control at %.3f)
\n"
- % (z_threshold, height_control, p_threshold))
- page.write("Cluster level p-value threshold: %s
\n" % cluster_pval)
- page.write("Cluster size threshold: %i voxels
\n" % cluster_th)
- page.write("Number of voxels: %i
\n" % nvox)
- page.write("Number of clusters: %i
\n" % nclust)
- page.write("\n")
- page.close()
- return clusters
-
-
-def generate_subject_stats_report(
- stats_report_filename,
- contrasts,
- z_maps,
- mask,
- design_matrices=None,
- subject_id=None,
- anat=None,
- display_mode="z",
- cut_coords=None,
- threshold=2.3,
- cluster_th=15,
- start_time=None,
- title=None,
- user_script_name=None,
- progress_logger=None,
- shutdown_all_reloaders=True,
- **glm_kwargs):
- """Generates a report summarizing the statistical methods and results
-
- Parameters
- ----------
- stats_report_filename: string:
- html file to which output (generated html) will be written
-
- contrasts: dict of arrays
- contrasts we are interested in; same number of contrasts as zmaps;
- same keys
-
- zmaps: dict of image objects or strings (image filenames)
- zmaps for contrasts we are interested in; one per contrast id
-
- mask: 'nifti image object'
- brain mask for ROI
-
- design_matrix: list of 'DesignMatrix', `numpy.ndarray` objects or of
- strings (.png, .npz, etc.) for filenames
- design matrices for the experimental conditions
-
- contrasts: dict of arrays
- dictionary of contrasts of interest; the keys are the contrast ids,
- the values are contrast values (lists)
-
- z_maps: dict of 3D image objects or strings (image filenames)
- dict with same keys as 'contrasts'; the values are paths of z-maps
- for the respective contrasts
-
- anat: 3D array (optional)
- brain image to serve bg unto which activation maps will be plotted
-
- anat_affine: 2D array (optional)
- affine data for the anat
-
- threshold: float (optional)
- threshold to be applied to activation maps voxel-wise
-
- cluster_th: int (optional)
- minimal voxel count for clusteres declared as 'activated'
-
- cmap: cmap object (default viz.cm.cold_hot)
- color-map to use in plotting activation maps
-
- start_time: string (optional)
- start time for the stats analysis (useful for the generated
- report page)
-
- user_script_name: string (optional, default None)
- existing filename, path to user script used in doing the analysis
-
- progress_logger: ProgressLogger object (optional)
- handle for logging progress
-
- shutdown_all_reloaders: bool (optional, default True)
- if True, all pages connected to the stats report page will
- be prevented from reloading after the stats report page
- has been completely generated
-
- **glm_kwargs:
- kwargs used to specify the control parameters used to specify the
- experimental paradigm and the GLM
-
- """
- # prepare for stats reporting
- if progress_logger is None:
- progress_logger = base_reporter.ProgressReport()
-
- output_dir = os.path.dirname(stats_report_filename)
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- # copy css and js stuff to output dir
- base_reporter.copy_web_conf_files(output_dir)
-
- # initialize gallery of design matrices
- design_thumbs = base_reporter.ResultsGallery(
- loader_filename=os.path.join(output_dir,
- "design.html")
- )
-
- # initialize gallery of activation maps
- activation_thumbs = base_reporter.ResultsGallery(
- loader_filename=os.path.join(output_dir,
- "activation.html")
- )
-
- # get caller module handle from stack-frame
- if user_script_name is None:
- user_script_name = sys.argv[0]
- user_source_code = base_reporter.get_module_source_code(
- user_script_name)
-
- methods = """
- GLM and Statistical Inference have been done using the %s script, \
-powered by nistats.""" % (user_script_name,
- base_reporter.NISTATS_URL)
-
- # report the control parameters used in the paradigm and analysis
- design_params = ""
- glm_kwargs["contrasts"] = contrasts
- if len(glm_kwargs):
- design_params += ("The following control parameters were used for "
- " specifying the experimental paradigm and fitting the "
- "GLM:
")
-
- # reshape glm_kwargs['paradigm']
- if "paradigm" in glm_kwargs:
- paradigm_ = glm_kwargs['paradigm']
- paradigm = {'name' : paradigm_['trial_type'],
- 'onset' : paradigm_['onset']}
- if 'duration' in paradigm_.keys():
- paradigm['duration'] = paradigm_['duration']
- paradigm['n_conditions'] = len(set(paradigm['name']))
- paradigm['n_events'] = len(paradigm['name'])
- paradigm['type'] = 'event'
- if 'duration' in paradigm.keys() and paradigm['duration'][0] > 0:
- paradigm['type'] = 'block'
- glm_kwargs['paradigm'] = paradigm
-
- design_params += base_reporter.dict_to_html_ul(glm_kwargs)
-
- if start_time is None:
- start_time = base_reporter.pretty_time()
-
- if title is None:
- title = "GLM and Statistical Inference"
- if not subject_id is None:
- title += " for subject %s" % subject_id
-
- level1_html_markup = base_reporter.get_subject_report_stats_html_template(
- title=title,
- start_time=start_time,
- subject_id=subject_id,
-
- # insert source code stub
- source_script_name=user_script_name,
- source_code=user_source_code,
-
- design_params=design_params,
- methods=methods,
- threshold=threshold)
-
- with open(stats_report_filename, 'w') as fd:
- fd.write(str(level1_html_markup))
- fd.close()
-
- progress_logger.log("Level 1 statistics
")
-
- # create design matrix thumbs
- if not design_matrices is None:
- if not hasattr(design_matrices, '__len__'):
- design_matrices = [design_matrices]
-
- for design_matrix, j in zip(design_matrices,
- range(len(design_matrices))):
-
- # Nistats: design matrices should be strings or pandas dataframes
- if isinstance(design_matrix, str):
- if not isinstance(design_matrix, pd.DataFrame):
- # XXX should be a DataFrame pickle here ?
- print(design_matrix)
- design_matrix = pd.read_pickle(design_matrix)
- else:
- raise TypeError(
- "Unsupported design matrix type: %s" % type(
- design_matrix))
-
- # plot design_matrix proper
- ax = plot_design_matrix(design_matrix)
- ax.set_position([.05, .25, .9, .65])
- dmat_outfile = os.path.join(output_dir,
- 'design_matrix_%i.png' % (j + 1))
- pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200)
- pl.close()
-
- thumb = base_reporter.Thumbnail()
- thumb.a = base_reporter.a(href=os.path.basename(dmat_outfile))
- thumb.img = base_reporter.img(src=os.path.basename(dmat_outfile),
- height="500px")
- thumb.description = "Design Matrix"
- thumb.description += " %s" % (j + 1) if len(
- design_matrices) > 1 else ""
-
- # commit activation thumbnail into gallery
- design_thumbs.commit_thumbnails(thumb)
-
- # create activation thumbs
- for contrast_id, contrast_val in contrasts.items():
- z_map = z_maps[contrast_id]
-
- # load the map
- if isinstance(z_map, str):
- z_map = nibabel.load(z_map)
-
- # generate level 1 stats table
- title = "Level 1 stats for %s contrast" % contrast_id
- stats_table = os.path.join(output_dir, "%s_stats_table.html" % (
- contrast_id))
- generate_level1_stats_table(
- z_map, mask, stats_table, cluster_th=cluster_th,
- z_threshold=threshold, title=title)
-
- # plot activation proper
- # XXX: nilearn's plotting bug's about rotations inf affine, etc.[print(c['cluster_p_value']) for c in clusters]
- z_map = reorder_img(z_map, resample="continuous")
- if not anat is None:
- anat = reorder_img(anat, resample="continuous")
- plot_stat_map(z_map, anat, threshold=threshold,
- display_mode=display_mode, cut_coords=cut_coords,
- black_bg=True)
-
- # store activation plot
- z_map_plot = os.path.join(output_dir,
- "%s_z_map.png" % contrast_id)
- pl.savefig(z_map_plot, dpi=200, bbox_inches='tight', facecolor="k",
- edgecolor="k")
- pl.close()
-
- # create thumbnail for activation
- thumbnail = base_reporter.Thumbnail(
- tooltip="Contrast vector: %s" % contrast_val)
- thumbnail.a = base_reporter.a(href=os.path.basename(stats_table))
- thumbnail.img = base_reporter.img(src=os.path.basename(z_map_plot),
- height="150px",)
- thumbnail.description = contrast_id
- activation_thumbs.commit_thumbnails(thumbnail)
-
- # we're done, shut down re-loaders
- progress_logger.log('
')
-
- # prevent stats report page from reloading henceforth
- progress_logger.finish(stats_report_filename)
-
- # prevent any related page from reloading
- if shutdown_all_reloaders:
- progress_logger.finish_dir(output_dir)
-
- # return generated html
- with open(stats_report_filename, 'r') as fd:
- stats_report = fd.read()
- fd.close()
-
- return stats_report
-
-
-def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir,
- start_time=base_reporter.pretty_time(),
- **kwargs):
- """
- Runs a one-sample t-test procedure for group analysis. Here, we are
- for each experimental condition, only interested refuting the null
- hypothesis H0: "The average effect accross the subjects is zero!"
-
- Parameters
- ----------
- masks: list of strings or nibabel image objects
- subject masks, one per subject
-
- effects_maps: list of dicts of lists
- effects maps from subject-level GLM; each entry is a dictionary;
- each entry (indexed by condition id) of this dictionary is the
- filename (or correspinding nibabel image object) for the effects
- maps for that condition (aka contrast),for that subject
-
- contrasts: dictionary of array_likes
- contrasts vectors, indexed by condition id
-
- kwargs: dict_like
- kwargs for plot_stats_map API
- """
-
- # make output directory
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- assert len(masks) == len(effects_maps), (len(masks), len(effects_maps))
-
- # compute group mask
- group_mask = intersect_masks(masks)
-
- # construct design matrix (only one covariate, namely the "mean effect")
- design_matrix = np.ones(len(effects_maps)
- )[:, np.newaxis] # only the intercept
-
- group_level_z_maps = {}
- group_level_t_maps = {}
- for contrast_id in contrasts:
- print("\tcontrast id: %s" % contrast_id)
-
- # effects maps will be the input to the second level GLM
- first_level_image = nibabel.concat_images(
- [x[contrast_id] for x in effects_maps])
-
- # fit 2nd level GLM for given contrast
- group_model = FirstLevelGLM(first_level_image,
- design_matrix, group_mask)
- group_model.fit(do_scaling=False, model='ols')
-
- # specify and estimate the contrast
- contrast_val = np.array(([[1.]])
- ) # the only possible contrast !
- z_map, t_map = group_model.contrast(
- contrast_val, con_id='one_sample %s' % contrast_id, output_z=True,
- output_stat=True)
-
- # save map
- for map_type, map_img in zip(["z", "t"], [z_map, t_map]):
- map_dir = os.path.join(output_dir, '%s_maps' % map_type)
- if not os.path.exists(map_dir):
- os.makedirs(map_dir)
- map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % (
- contrast_id))
- print("\t\tWriting %s ..." % map_path)
- nibabel.save(map_img, map_path)
- if map_type == "z":
- group_level_z_maps[contrast_id] = map_path
- elif map_type == "t":
- group_level_z_maps[contrast_id] = map_path
-
- # do stats report
- stats_report_filename = os.path.join(output_dir, "report_stats.html")
- generate_subject_stats_report(stats_report_filename, contrasts,
- group_level_z_maps, group_mask,
- start_time=start_time,
- **kwargs)
-
- print("\r\nStatistic report written to %s\r\n" % stats_report_filename)
-
- return group_level_z_maps
diff --git a/pypreprocess/reporting/preproc_reporter.py b/pypreprocess/reporting/pypreproc_reporter.py
similarity index 99%
rename from pypreprocess/reporting/preproc_reporter.py
rename to pypreprocess/reporting/pypreproc_reporter.py
index 8040615d..33703336 100644
--- a/pypreprocess/reporting/preproc_reporter.py
+++ b/pypreprocess/reporting/pypreproc_reporter.py
@@ -826,4 +826,10 @@ def lines2breaks(lines, delimiter="\n", number_lines=False):
else:
lines = ["- %s
" % line for line in lines]
output = "" + "".join(lines) + "
"
- return output
\ No newline at end of file
+ return output
+
+def pretty_time():
+ """
+ Returns currenct time in the format: hh:mm:ss ddd mmm yyyy.
+ """
+ return " ".join([time.ctime().split(" ")[i] for i in [3, 0, 2, 1, 4]])
\ No newline at end of file
diff --git a/pypreprocess/spm_loader/utils.py b/pypreprocess/spm_loader/utils.py
index 9a95424f..29982d71 100644
--- a/pypreprocess/spm_loader/utils.py
+++ b/pypreprocess/spm_loader/utils.py
@@ -16,7 +16,6 @@
PYPREPROCESS_DIR = os.path.dirname(
os.path.dirname(os.path.split(os.path.abspath(__file__))[0]))
sys.path.append(PYPREPROCESS_DIR)
-from ..reporting import glm_reporter
from ..nipype_preproc_spm_utils import do_subjects_preproc, SubjectData
@@ -277,29 +276,6 @@ def execute_glm(doc, out_dir, contrast_definitions=None,
_contrasts[contrast_name] = contrast
z_maps[contrast_name] = map_path
- # invoke a single API to handle plotting and html business for you
- subject_stats_report_filename = os.path.join(
- subject_output_dir, "report_stats.html")
- glm_reporter.generate_subject_stats_report(
- subject_stats_report_filename,
- _contrasts,
- z_maps,
- doc['mask'],
- design_matrices=list(params['design_matrices']),
- subject_id=doc['subject'],
- cluster_th=15, # 15 voxels
- start_time=stats_start_time,
- TR=doc['TR'],
- n_scans=doc['n_scans'],
- n_sessions=doc['n_sessions'],
- model=glm_model,
- )
-
- print("Report for subject %s written to %s" % (
- doc['subject'],
- subject_stats_report_filename))
-
-
def inv_perm(perm):
inverse = [0] * len(perm)
for i, p in enumerate(perm):
diff --git a/pypreprocess/tests/test_bug_fixes.py b/pypreprocess/tests/test_bug_fixes.py
deleted file mode 100644
index c4fddc13..00000000
--- a/pypreprocess/tests/test_bug_fixes.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from pypreprocess.tests._test_utils import _make_sd
-from pypreprocess.reporting.preproc_reporter import generate_stc_thumbnails
-
-
-def test_bug_49_index_error_fix():
- sd = _make_sd(ext=".nii.gz", n_sessions=2,
- unique_func_names=True)
- sd.sanitize()
- sd.init_report()
- generate_stc_thumbnails(sd.func, sd.func,
- sd.reports_output_dir)
diff --git a/scripts/hcp_preproc_and_analysis.py b/scripts/hcp_preproc_and_analysis.py
index 339e5090..6cff5c94 100644
--- a/scripts/hcp_preproc_and_analysis.py
+++ b/scripts/hcp_preproc_and_analysis.py
@@ -21,11 +21,7 @@
from pypreprocess.io_utils import load_specific_vol
from pypreprocess.fsl_to_nipy import (read_fsl_design_file,
make_dmtx_from_timing_files)
-from pypreprocess.reporting.glm_reporter import (generate_subject_stats_report,
- group_one_sample_t_test)
-from pypreprocess.reporting.base_reporter import (ProgressReport,
- pretty_time
- )
+from pypreprocess.reporting.pypreproc_reporter import pretty_time
from pypreprocess.conf_parser import _generate_preproc_pipeline
from joblib import Parallel, delayed, Memory
from nipype.caching import Memory as NipypeMemory
@@ -533,20 +529,3 @@ def tortoise(*args):
print("... done.\r\n")
print("Group GLM")
- contrasts = subjects[0].contrasts
- subjects_effects_maps = [subject_data.effects_maps
- for subject_data in subjects]
-
- group_one_sample_t_test(
- mask_images,
- subjects_effects_maps,
- contrasts,
- task_output_dir,
- threshold=threshold,
- cluster_th=cluster_th,
- start_time=stats_start_time,
- subjects=[subject_data.subject_id for subject_data in subjects],
- title='Group GLM for HCP fMRI %s protocol (%i subjects)' % (
- protocol, len(subjects)),
- slicer=slicer
- )
diff --git a/scripts/openfmri.py b/scripts/openfmri.py
deleted file mode 100644
index a6abc085..00000000
--- a/scripts/openfmri.py
+++ /dev/null
@@ -1,177 +0,0 @@
-import os
-import glob
-import numpy as np
-import pandas as pd
-import nibabel
-from joblib import Parallel, delayed, Memory
-from nistats.design_matrix import (make_design_matrix, check_design_matrix)
-from pypreprocess.external.nistats.glm import FMRILinearModel
-from pypreprocess.reporting.base_reporter import ProgressReport
-from pypreprocess.reporting.glm_reporter import (generate_subject_stats_report,
- group_one_sample_t_test)
-from nilearn.plotting import plot_prob_atlas, show
-import matplotlib.pyplot as plt
-
-n_jobs = int(os.environ.get("N_JOBS", 1))
-
-
-def _load_contrast_names(cfile):
- contrast_names = []
- with open(cfile, 'r') as fd:
- cons = [x.rstrip("\r\n") for x in fd.readlines()]
- cons = [x for x in cons if x]
- for c in cons:
- c = c.split(" ")
- contrast_names.append(c[1])
- return contrast_names
-
-
-def _load_condition_keys(cfile):
- conditions = {}
- with open(cfile, 'r') as fd:
- conds = [x.rstrip("\r\n") for x in fd.readlines()]
- conds = [x for x in conds if x]
- for c in conds:
- c = c.split(" ")
- conditions[c[1]] = c[2]
- return conditions
-
-
-# meta data
-tr = 2.
-drift_model = 'Cosine'
-hrf_model = 'spm + derivative'
-hfcut = 128.
-
-data_dir = os.environ.get("DATA_DIR", "/home/elvis/nilearn_data/ds001")
-output_dir = os.environ.get("OUTPUT_DIR",
- os.path.join(data_dir,
- "pypreprocess_output/ds001"))
-condition_keys = _load_condition_keys(
- os.path.join(data_dir, "models/model001/condition_key.txt"))
-contrast_names = _load_contrast_names(
- os.path.join(data_dir, "models/model001/task_contrasts.txt"))
-subject_ids = map(os.path.basename,
- sorted(glob.glob("%s/sub*" % output_dir)))[:8]
-
-
-def do_subject_glm(subject_id):
- subject_output_dir = os.path.join(output_dir, subject_id)
-
- # make design matrices
- design_matrices = []
- func = []
- anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii")
- for run_path in sorted(glob.glob(os.path.join(
- data_dir, subject_id, "model/model001/onsets/task*"))):
- run_id = os.path.basename(run_path)
- run_func = glob.glob(os.path.join(subject_output_dir, "BOLD", run_id,
- "wrbold*.nii"))
- assert len(run_func) == 1
- run_func = run_func[0]
- run_onset_paths = sorted(glob.glob(os.path.join(
- data_dir, subject_id, "model/model001/onsets/%s/*" % run_id)))
- onsets = map(np.loadtxt, run_onset_paths)
- conditions = np.hstack(
- [[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c])
- for c in range(len(run_onset_paths))])
- onsets = np.vstack((onsets))
- onsets *= tr
- run_func = nibabel.load(run_func)
- func.append(run_func)
- n_scans = run_func.shape[-1]
- onset, duration, modulation = onsets.T
-
- frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
- paradigm = pd.DataFrame(dict(name=conditions, onset=onset,
- duration=duration, modulation=modulation))
- design_matrix = make_design_matrix(frametimes, paradigm,
- hrf_model=hrf_model,
- drift_model=drift_model,
- period_cut=hfcut)
- design_matrices.append(design_matrix)
- n_runs = len(func)
-
- # specify contrasts
- _, _, names = check_design_matrix(design_matrix)
- n_columns = len(names)
- contrast_matrix = np.eye(n_columns)
- contrasts = {}
- for c in range(len(condition_keys)):
- contrasts[names[2 * c]] = contrast_matrix[2 * c]
- contrasts["avg"] = np.mean(contrasts.values(), axis=0)
-
- # more interesting contrasts
- contrasts_ = {}
- for contrast, val in contrasts.items():
- if not contrast == "avg":
- contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"]
- contrasts = contrasts_
-
- # fit GLM
- from nilearn.image import smooth_img
- func = smooth_img(func, fwhm=8.)
- print('Fitting a GLM (this takes time)...')
- fmri_glm = FMRILinearModel(func, [check_design_matrix(design_matrix)[1]
- for design_matrix in design_matrices],
- mask='compute')
- fmri_glm.fit(do_scaling=True, model='ar1')
-
- # save computed mask
- mask_path = os.path.join(subject_output_dir, "mask.nii")
- print("Saving mask image to %s ..." % mask_path)
- nibabel.save(fmri_glm.mask, mask_path)
-
- # compute contrast maps
- z_maps = {}
- effects_maps = {}
- for contrast_id, contrast_val in contrasts.items():
- print("\tcontrast id: %s" % contrast_id)
- z_map, t_map, effects_map, var_map = fmri_glm.contrast(
- [contrast_val] * n_runs, con_id=contrast_id, output_z=True,
- output_stat=True, output_effects=True, output_variance=True)
- for map_type, out_map in zip(['z', 't', 'effects', 'variance'],
- [z_map, t_map, effects_map, var_map]):
- map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type)
- if not os.path.exists(map_dir):
- os.makedirs(map_dir)
- map_path = os.path.join(
- map_dir, '%s.nii.gz' % contrast_id)
- print("\t\tWriting %s ..." % map_path)
- nibabel.save(out_map, map_path)
- if map_type == 'z':
- z_maps[contrast_id] = map_path
- if map_type == 'effects':
- effects_maps[contrast_id] = map_path
-
- # # generate stats report
- # stats_report_filename = os.path.join(subject_output_dir, "reports",
- # "report_stats.html")
- # generate_subject_stats_report(
- # stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat,
- # threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr,
- # subject_id="sub001", n_scans=n_scans, hfcut=hfcut,
- # paradigm=paradigm, frametimes=frametimes,
- # drift_model=drift_model, hrf_model=hrf_model)
- # ProgressReport().finish_dir(subject_output_dir)
-
- return dict(subject_id=subject_id, mask=mask_path,
- effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)
-
-
-# first level GLM
-mem = Memory(os.path.join(output_dir, "cache_dir"))
-n_jobs = min(n_jobs, len(subject_ids))
-first_levels = Parallel(n_jobs=n_jobs)(delayed(mem.cache(do_subject_glm))(
- subject_id) for subject_id in subject_ids)
-
-# run second-level GLM
-group_zmaps = group_one_sample_t_test(
- [subject_data["mask"] for subject_data in first_levels],
- [subject_data["effects_maps"] for subject_data in first_levels],
- first_levels[0]["contrasts"],
- output_dir, threshold=2.)
-plot_prob_atlas([zmap for zmap in group_zmaps.values() if "_minus_" in zmap],
- threshold=1.2, view_type="filled_contours")
-plt.savefig("group_zmaps.png")
-show()