Skip to content

Commit

Permalink
Merge pull request #75 from Islast/dev
Browse files Browse the repository at this point in the history
Remove names_308_style from BrainNetworksInPython
merging ✨
  • Loading branch information
Islast committed Aug 14, 2018
2 parents 2357a84 + 1ef50c8 commit e713121
Show file tree
Hide file tree
Showing 9 changed files with 965 additions and 2,039 deletions.
Original file line number Diff line number Diff line change
@@ -1,44 +1,3 @@
Left-Cerebral-White-Matter
Left-Lateral-Ventricle
Left-Inf-Lat-Vent
Left-Cerebellum-White-Matter
Left-Cerebellum-Cortex
Left-Thalamus-Proper
Left-Caudate
Left-Putamen
Left-Pallidum
3rd-Ventricle
4th-Ventricle
Brain-Stem
Left-Hippocampus
Left-Amygdala
CSF
Left-Accumbens-area
Left-VentralDC
Left-vessel
Left-choroid-plexus
Right-Cerebral-White-Matter
Right-Lateral-Ventricle
Right-Inf-Lat-Vent
Right-Cerebellum-White-Matter
Right-Cerebellum-Cortex
Right-Thalamus-Proper
Right-Caudate
Right-Putamen
Right-Pallidum
Right-Hippocampus
Right-Amygdala
Right-Accumbens-area
Right-VentralDC
Right-vessel
Right-choroid-plexus
WM-hypointensities
Optic-Chiasm
CC_Posterior
CC_Mid_Posterior
CC_Central
CC_Mid_Anterior
CC_Anterior
lh_bankssts_part1
lh_bankssts_part2
lh_caudalanteriorcingulate_part1
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,13 @@
names_file = filepath + "/500.names.txt"
regionalmeasures_file = filepath + "/PARC_500aparc_thickness_behavmerge.csv"
covars_file = None
names_308_style = True


def _data():
return (centroids_file,
regionalmeasures_file,
names_file,
covars_file,
names_308_style)
covars_file)


def _centroids():
Expand All @@ -46,15 +44,10 @@ def _covars():
return covars_file


def _is_names_308_style():
return names_308_style


def import_data():
return read_in_data(
regionalmeasures_file,
names_file,
covars_file=covars_file,
centroids_file=centroids_file,
names_308_style=names_308_style,
data_as_df=True)
6 changes: 1 addition & 5 deletions BrainNetworksInPython/make_graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def anatomical_graph_attributes():
return ['parcellation', 'centroids']


def assign_node_names(G, parcellation, names_308_style=False):
def assign_node_names(G, parcellation):
"""
Returns the network G with node attributes "name" assigned
according to the list parcellation.
Expand All @@ -36,10 +36,6 @@ def assign_node_names(G, parcellation, names_308_style=False):
# Assign anatomical names to the nodes
for i, node in enumerate(G.nodes()):
G.node[i]['name'] = parcellation[i]
if names_308_style:
G.node[i]['name_34'] = parcellation[i].split('_')[1]
G.node[i]['name_68'] = parcellation[i].rsplit('_', 1)[0]
G.node[i]['hemi'] = parcellation[i].split('_', 1)[0]
#
G.graph['parcellation'] = True
return G
Expand Down
19 changes: 1 addition & 18 deletions BrainNetworksInPython/scripts/useful_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ def read_in_data(
names_file,
covars_file=None,
centroids_file=None,
names_308_style=False,
data_as_df=True):
'''
Read in the data from the three input files:
Expand All @@ -23,16 +22,10 @@ def read_in_data(
brain regions. Should be aligned with names_file such that the ith
line of centroids_file is the coordinates of the brain region named
in the ith line of names_file.
* names_308_style : If the names are in 308 style then drop the first
41 entries from the names file.
'''
# Load names
with open(names_file) as f:
names = [line.strip() for line in f]
# If you have your names in names_308_style you need to strip the
# first 41 items
if names_308_style:
names = names[41:]

# Load covariates
if covars_file is not None:
Expand All @@ -43,24 +36,14 @@ def read_in_data(

if centroids_file is not None:
centroids = np.loadtxt(centroids_file)
# If you have your names in names_308_style you need to strip the
# first 41 items
if names_308_style:
names = names[41:]
centroids = centroids[41:, :]

# Load data
if data_as_df:
df = pd.read_csv(data)
# You may also have to strip the words "thickness" from the
# end of the names in the data frame
if names_308_style:
df.columns = [col.rsplit('_thickness', 1)[0] for col in df.columns]
else:
df = np.loadtxt(data)

return df, names, covars_list, centroids, names_308_style

return df, names, covars_list, centroids

def write_out_measures(df, output_dir, name, first_columns=[]):
'''
Expand Down
17 changes: 1 addition & 16 deletions BrainNetworksInPython/wrappers/corrmat_from_regionalmeasures.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,18 +72,6 @@ def setup_argparser():
(' Default: None')),
default=None)

parser.add_argument(
'--names_308_style',
action='store_true',
help=textwrap.dedent(
('Include this flag if your names are in the NSPN 308\n') +
('parcellation style (which means you have 41 subcortical \
regions)\n') +
('that are still in the names files and that\n') +
('the names are in <hemi>_<DK-region>_<part> format.\n') +
(' Default: False')),
default=False)

parser.add_argument(
'--method',
type=str,
Expand All @@ -102,7 +90,6 @@ def corrmat_from_regionalmeasures(regional_measures_file,
names_file,
output_name,
covars_file=None,
names_308_style=False,
method='pearson'):
'''
Read in regional measures, names and covariates files to compute
Expand All @@ -123,8 +110,7 @@ def corrmat_from_regionalmeasures(regional_measures_file,
df, names, covars_list, *a = read_in_data(
regional_measures_file,
names_file,
covars_file=covars_file,
names_308_style=names_308_style)
covars_file=covars_file)

M = mcm.corrmat_from_regionalmeasures(
df, names, covars=covars_list, method=method)
Expand All @@ -144,7 +130,6 @@ def corrmat_from_regionalmeasures(regional_measures_file,
arg.names_file,
arg.output_name,
covars_file=arg.covars_file,
names_308_style=arg.names_308_style,
method=arg.method)

# ============================================================================
Expand Down
21 changes: 3 additions & 18 deletions BrainNetworksInPython/wrappers/network_analysis_from_corrmat.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,17 +86,6 @@ def setup_argparser():
with real network.\n') + (' Default: 1000')),
default=1000)

parser.add_argument(
'--names_308_style',
action='store_true',
help=textwrap.dedent(
('Include this flag if your names are in the NSPN 308\n')
+ ('parcellation style (which means you have 41 subcortical \
regions)\n')
+ ('that are still in the names and centroids files and that\n')
+ ('the names are in <hemi>_<DK-region>_<part> format.\n')
+ (' Default: False')),
default=False)

arguments = parser.parse_args()

Expand All @@ -108,8 +97,7 @@ def network_analysis_from_corrmat(corr_mat_file,
centroids_file,
output_dir,
cost=10,
n_rand=1000,
names_308_style=False):
n_rand=1000):
'''
This is the big function!
It reads in the correlation matrix, thresholds it at the given cost
Expand All @@ -122,7 +110,6 @@ def network_analysis_from_corrmat(corr_mat_file,
corr_mat_file,
names_file,
centroids_file=centroids_file,
names_308_style=names_308_style,
data_as_df=False)

corrmat = os.path.basename(corr_mat_file).strip('.txt')
Expand All @@ -131,8 +118,7 @@ def network_analysis_from_corrmat(corr_mat_file,
B = bnip.BrainNetwork(
network=M,
parcellation=names,
centroids=centroids,
names_308_style=names_308_style)
centroids=centroids)
# Threshold graph
G = B.threshold(cost)
# Calculate the modules
Expand Down Expand Up @@ -181,8 +167,7 @@ def network_analysis_from_corrmat(corr_mat_file,
arg.centroids_file,
arg.output_dir,
cost=arg.cost,
n_rand=arg.n_rand,
names_308_style=arg.names_308_style)
n_rand=arg.n_rand)

# =============================================================================
# Wooo! All done :)
Expand Down
62 changes: 30 additions & 32 deletions tests/write_fixtures.py
Original file line number Diff line number Diff line change
@@ -1,42 +1,41 @@
#--------------------------- Write fixtures ---------------------------
# To regression test our wrappers we need examples. This script
# To regression test our wrappers we need examples. This script
# generates files. We save these files once, and regression_test.py
# re-generates these files to tests them for identicality with the
# presaved examples (fixtures). If they are found not to be identical
# it throws up an error.
# presaved examples (fixtures). If they are found not to be identical
# it throws up an error.
#
# The point of this is to check that throughout the changes we make to
# The point of this is to check that throughout the changes we make to
# BrainNetworksInPython the functionality of this script stays the same
#
# Currently the functionality of write_fixtures is to generate corrmat
# and network_analysis data via the functions
# Currently the functionality of write_fixtures is to generate corrmat
# and network_analysis data via the functions
# corrmat_from_regionalmeasures and network_analysis_from_corrmat.
#----------------------------------------------------------------------
import os
import sys
import networkx as nx

def recreate_correlation_matrix_fixture(folder):
##### generate a correlation matrix in the given folder using #####
##### the Whitaker_Vertes dataset #####
##### the Whitaker_Vertes dataset #####
import BrainNetworksInPython.datasets.NSPN_WhitakerVertes_PNAS2016.data as data
centroids, regionalmeasures, names, covars, names_308_style = data._get_data()
centroids, regionalmeasures, names, covars= data._get_data()
from BrainNetworksInPython.wrappers.corrmat_from_regionalmeasures import corrmat_from_regionalmeasures
corrmat_path = os.getcwd()+folder+'/corrmat_file.txt'
corrmat_from_regionalmeasures(
regionalmeasures,
names,
corrmat_path,
names_308_style=names_308_style)

names,
corrmat_path)

def recreate_network_analysis_fixture(folder, corrmat_path):
##### generate network analysis in the given folder using the #####
##### data in example_data and the correlation matrix given #####
##### by corrmat_path #####
##### by corrmat_path #####
import BrainNetworksInPython.datasets.NSPN_WhitakerVertes_PNAS2016.data as data
centroids, regionalmeasures, names, covars, names_308_style = data._get_data()
# It is necessary to specify a random seed because
# network_analysis_from_corrmat generates random graphs to
centroids, regionalmeasures, names, covars= data._get_data()
# It is necessary to specify a random seed because
# network_analysis_from_corrmat generates random graphs to
# calculate global measures
import random
random.seed(2984)
Expand All @@ -46,48 +45,48 @@ def recreate_network_analysis_fixture(folder, corrmat_path):
centroids,
os.getcwd()+folder+'/network-analysis',
cost=10,
n_rand=10, # this is not a reasonable
n_rand=10 # this is not a reasonable
# value for n, we generate only 10 random
# graphs to save time
names_308_style=names_308_style)
def write_fixtures(folder='/temporary_test_fixtures'):
)

def write_fixtures(folder='/temporary_test_fixtures'):
## Run functions corrmat_from_regionalmeasures and ##
## network_analysis_from_corrmat to save corrmat in given folder ##
##---------------------------------------------------------------##
# if the folder does not exist, create it
if not os.path.isdir(os.getcwd()+folder):
os.makedirs(os.getcwd()+folder)
# generate and save the correlation matrix
print("generating new correlation matrix")
print("generating new correlation matrix")
recreate_correlation_matrix_fixture(folder)
# generate and save the network analysis
print("generating new network analysis")
print("generating new network analysis")
corrmat_path = 'temporary_test_fixtures/corrmat_file.txt'
recreate_network_analysis_fixture(folder, corrmat_path)

def delete_fixtures(folder):
import shutil
print('\ndeleting temporary files')
shutil.rmtree(os.getcwd()+folder)

def hash_folder(folder='temporary_test_fixtures'):
hashes = {}
for path, directories, files in os.walk(folder):
for file in sorted(files):
hashes[os.path.join(path, file)] = hash_file(os.path.join(path, file))
for dir in sorted(directories):
hashes.update(hash_folder(os.path.join(path, dir)))
break
break
return hashes


def hash_file(filename):
import hashlib
m = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
b = f.read(2**10)
b = f.read(2**10)
if not b: break
m.update(b)
return m.hexdigest()
Expand All @@ -103,16 +102,16 @@ def generate_fixture_hashes(folder='temporary_test_fixtures'):
return hash_dict

def current_fixture_name():
# returns the fixture name appropriate the current versions
# returns the fixture name appropriate the current versions
# of python and networkx
return "tests/.fixture_hash"+str(sys.version_info[:2])+'networkx_version'+str(nx.__version__)

def pickle_hash(hash_dict):
import pickle
# when we save we record the python and networkx versions
with open(current_fixture_name(), 'wb') as f:
pickle.dump(hash_dict, f)

def unpickle_hash():
import pickle
# import fixture relevant to the current python, networkx versions
Expand All @@ -125,4 +124,3 @@ def unpickle_hash():
if input("Are you sure you want to update Brain Networks In Python's test fixtures? (y/n)") == 'y':
hash_dict = generate_fixture_hashes()
pickle_hash(hash_dict)

Loading

0 comments on commit e713121

Please sign in to comment.