Skip to content

Commit

Permalink
Pull 24.1 release to main (#789)
Browse files Browse the repository at this point in the history
* Turn off column statistics option when running mysqldump (#771)

* turn off column statistics option when running mysqldump

* add new option to the script to provide additional mysqldump options

* run mass perldoc

* fix perldoc

* fix perldoc

* check if date exists otherwise set it to null when inserting into the files table (#777)

* [dcm2bids] Get scanner candidate's RegistrationProjectID based on the scanned candidate's visit ProjectID (#779)

* add registrationProjectID to the function creating the candidate scanner

* missed function call in base_pipeline

* flake8

* Pull 24.0-release to 24.1-release (#788)

* Reload the mri_upload dictionary before checking if a tarchive has been validated (#783)

* reload mri_upload object

* remove debugging exit and print statements

* fix minor bugs when dealing with scans.tsv files (#774)

* fix regex search for excluded series description patterns (#786)

* fix_return_statement_of_create_imaging_upload_dict_from_upload_id_function (#787)
  • Loading branch information
cmadjar authored Aug 16, 2022
1 parent ff24136 commit e7d09c3
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 10 deletions.
2 changes: 2 additions & 0 deletions python/lib/dcm2bids_imaging_pipeline_lib/base_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,8 @@ def check_if_tarchive_validated_in_db(self):
If the DICOM archive was not validated, the pipeline will exit and log the proper error information.
"""
# reload the mri_upload object with updated database values
self.load_imaging_upload_and_tarchive_dictionaries()
mu_dict = self.imaging_upload_obj.imaging_upload_dict
if ("IsTarchiveValidated" not in mu_dict.keys() or not mu_dict["IsTarchiveValidated"]) and not self.force:
err_msg = f"The DICOM archive validation has failed for UploadID {self.upload_id}. Either run the" \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,12 +234,12 @@ def _is_series_description_to_be_excluded(self, json_file_path):
series_desc = json_data_dict["SeriesDescription"]

if type(self.excluded_series_desc_regex_list) is str:
pattern = re.compile(self.excluded_series_desc_regex_list)
return True if pattern.match(series_desc) else False
pattern = re.compile(self.excluded_series_desc_regex_list, re.IGNORECASE)
return True if re.search(pattern, series_desc) else False
else:
for regex in self.excluded_series_desc_regex_list:
pattern = re.compile(regex)
if pattern.match(series_desc):
pattern = re.compile(regex, re.IGNORECASE)
if re.search(pattern, series_desc):
return True

def _loop_through_nifti_files_and_insert(self):
Expand Down
2 changes: 1 addition & 1 deletion python/lib/imaging_upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def create_imaging_upload_dict_from_upload_id(self, upload_id):
"""

results = self.mri_upload_db_obj.create_mri_upload_dict('UploadID', upload_id)
self.imaging_upload_dict = results[0]
self.imaging_upload_dict = results[0] if results else None

def create_imaging_upload_dict_from_tarchive_id(self, tarchive_id):
"""
Expand Down
17 changes: 13 additions & 4 deletions python/lib/scanstsv.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ def get_acquisition_time(self):
exit()
else:
eeg_acq_time = self.acquisition_data['acq_time']

if eeg_acq_time == 'n/a':
return None

try:
eeg_acq_time = parse(eeg_acq_time)
except ValueError as e:
Expand Down Expand Up @@ -114,11 +118,16 @@ def get_age_at_scan(self):

def copy_scans_tsv_file_to_loris_bids_dir(self, bids_sub_id, loris_bids_root_dir, data_dir):

file = self.scans_tsv_file
copy = loris_bids_root_dir + '/sub-' + bids_sub_id + '/' + os.path.basename(self.scans_tsv_file)
utilities.copy_file(file, copy, self.verbose)
original_file_path = self.scans_tsv_file
final_file_path = loris_bids_root_dir + '/sub-' + bids_sub_id + '/' + os.path.basename(self.scans_tsv_file)

# copy the scans.tsv file to the new directory
if os.path.exists(final_file_path):
lib.utilities.append_to_tsv_file(original_file_path, final_file_path, "filename", self.verbose)
else:
lib.utilities.copy_file(original_file_path, final_file_path, self.verbose)

# determine the relative path and return it
relative_path = copy.replace(data_dir, "")
relative_path = final_file_path.replace(data_dir, "")

return relative_path
3 changes: 2 additions & 1 deletion python/lib/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,9 @@ def append_to_tsv_file(new_tsv_file, old_tsv_file, key_value_check, verbose):
# verify that the header rows of the two TSV file are the same
new_tsv_content = read_tsv_file(new_tsv_file)
old_tsv_content = read_tsv_file(old_tsv_file)
tsv_basename = os.path.basename(new_tsv_file)
if new_tsv_content[0].keys() != old_tsv_content[0].keys():
print(f"ERROR: participants.tsv columns differ between {new_tsv_file} and {old_tsv_file}")
print(f"ERROR: {tsv_basename} columns differ between {new_tsv_file} and {old_tsv_file}")
sys.exit(lib.exitcode.PROGRAM_EXECUTION_FAILURE)

# loop through the rows of the new TSV file and check whether it is already present in the old TSV file
Expand Down

0 comments on commit e7d09c3

Please sign in to comment.