diff --git a/.eslintignore b/.eslintignore index 84e9701f..fb3be281 100644 --- a/.eslintignore +++ b/.eslintignore @@ -4,4 +4,5 @@ api/*.js api/*.js.map handler/convert.js ui/test/events/*.js -api/*.ts \ No newline at end of file +api/*.ts +handler/convert.* \ No newline at end of file diff --git a/handler/Dockerfile b/handler/Dockerfile index f86d0438..9f00090b 100755 --- a/handler/Dockerfile +++ b/handler/Dockerfile @@ -1,4 +1,4 @@ -FROM neurodebian:nd20.04-non-free +FROM neurodebian:nd22.04-non-free SHELL ["/bin/bash", "-c"] @@ -7,11 +7,29 @@ ENV DEBIAN_FRONTEND noninteractive RUN apt update && \ apt-get update && apt-get upgrade -y -RUN apt install -y parallel python3 python3-pip tree curl unzip git jq python libgl-dev python-numpy +# RUN apt install -y parallel python3 python3-pip tree curl unzip git jq libgl-dev python-numpy (orig) +RUN apt install -y parallel python3 python3-pip tree curl unzip git jq libgl-dev -RUN pip3 install numpy==1.23.0 nibabel==4.0.0 pandas matplotlib pyyaml==5.4.1 pydicom==2.3.1 natsort pydeface && \ +RUN pip3 install numpy nibabel pandas matplotlib pyyaml pydicom natsort pydeface && \ pip3 install quickshear mne mne-bids +# Install eye2bids for eyetracking +RUN git clone https://github.com/bids-standard/eye2bids.git && \ + cd eye2bids && \ + pip3 install . + +# Install edf2asc function from EyeLink Developers Kit (needed by eye2bids for EyeLink data) +RUN git clone https://github.com/dlevitas/edf2asc /tmp/edf2asc && \ + cp -r /tmp/edf2asc/edf2asc /usr/local/bin && \ + chmod +x /usr/local/bin/edf2asc +# RUN add-apt-repository universe && \ +# apt update && \ +# apt install ca-certificates && \ +# apt-key adv --fetch-keys https://apt.sr-research.com/SRResearch_key && \ +# add-apt-repository 'deb [arch=amd64] https://apt.sr-research.com SRResearch main' && \ +# apt update && \ +# apt install eyelink-display-software + # Install pypet2bids RUN git clone https://github.com/openneuropet/PET2BIDS && \ cd PET2BIDS && make installpoetry buildpackage installpackage @@ -21,8 +39,9 @@ RUN apt-get install -y build-essential pkg-config cmake git pigz rename zstd lib RUN touch /.pet2bidsconfig && chown 1001:1001 /.pet2bidsconfig -RUN echo "DEFAULT_METADATA_JSON=/usr/local/lib/python3.8/dist-packages/pypet2bids/template_json.json" > /.pet2bidsconfig +RUN echo "DEFAULT_METADATA_JSON=/usr/local/lib/python3.10/dist-packages/pypet2bids/template_json.json" > /.pet2bidsconfig +# Install FSL #this is the most tedious that's why it's one of the first layers installed #install fsl, and get rid of src # RUN wget https://fsl.fmrib.ox.ac.uk/fsldownloads/fslinstaller.py && \ @@ -76,10 +95,10 @@ RUN cd /tmp && curl -fLO https://github.com/rordenlab/dcm2niix/releases/latest/d # && make \ # && mv dcm2niix /usr/local/bin -# Get bids-specification from github +# Install bids-specification from github RUN cd && git clone https://github.com/bids-standard/bids-specification -#install ROBEX +# Install ROBEX ADD https://www.nitrc.org/frs/download.php/5994/ROBEXv12.linux64.tar.gz//?i_agree=1&download_now=1 / RUN tar -xzf /ROBEXv12.linux64.tar.gz ENV PATH /ROBEX:$PATH @@ -87,10 +106,10 @@ ENV PATH /ROBEX:$PATH ENV NODE_PATH $NVM_DIR/v$NODE_VERSION/lib/node_modules ENV PATH $NVM_DIR/v$NODE_VERSION/bin:$PATH -#install bids-validator +# Install bids-validator RUN npm install -g bids-validator@1.11.0 RUN git clone https://github.com/bids-standard/bids-validator -# install source code from local +# Install source code from local WORKDIR /app/handler RUN npm -g install pm2 diff --git a/handler/convert.js b/handler/convert.js index d01b5243..a7222429 100755 --- a/handler/convert.js +++ b/handler/convert.js @@ -260,6 +260,64 @@ async.forEachOf(info.objects, (o, idx, next_o) => { } }); } + function handleBeh() { + /* + - suffixes: + - beh + - events + - physio + - stim + + */ + if (suffix == "events") { + //we handle events a bit differently.. we need to generate events.tsv from items content + const events = o.items.find(o => !!o.eventsBIDS); + const headers = Object.keys(events.eventsBIDS[0]); //take first index value to see which columns user selected + events.content = headers.join("\t") + "\n"; + events.eventsBIDS.forEach(rec => { + if(rec.stim_file) { + if(!rec.stim_file.startsWith("/stimuli/")) { + rec.stim_file = "/stimuli/" + rec.stim_file + } + } + const row = []; + headers.forEach(key => { + row.push(rec[key]); + }); + events.content += row.join("\t") + "\n"; + }); + //add stuff to sidecar + const sidecar = o.items.find(o => o.name == "json"); + sidecar.sidecar.TaskName = o._entities.task; + sidecar.sidecar.trial_type = { + LongName: info.events.trialTypes.longName, + Description: info.events.trialTypes.desc, + Levels: info.events.trialTypes.levels, + }; + //now save + handleItem(events, "events.tsv"); + handleItem(sidecar, "events.json"); + } + else { + //normal beh stuff.. + o.items.forEach(item => { + switch (item.name) { + case "tsv": + handleItem(item, suffix + ".tsv"); + break; + case "tsv.gz": + handleItem(item, suffix + ".tsv.gz"); + break; + case "json": + item.sidecar.TaskName = o._entities.task; + handleItem(item, suffix + ".json"); + break; + default: + console.error("unknown beh item name", item.name); + } + }); + } + } function handleFunc() { /* - suffixes: @@ -304,6 +362,9 @@ async.forEachOf(info.objects, (o, idx, next_o) => { case "nii.gz": handleItem(item, suffix + ".nii.gz"); break; + case "tsv.gz": + handleItem(item, suffix + ".tsv.gz"); + break; case "json": //handle B0FieldIdentifier and B0FieldSource if present if(o.B0FieldIdentifier) { @@ -455,6 +516,9 @@ async.forEachOf(info.objects, (o, idx, next_o) => { case "anat": handleAnat(); break; + case "beh": + handleBeh(); + break; case "func": handleFunc(); break; diff --git a/handler/convert.ts b/handler/convert.ts index dac2add8..f6060a44 100755 --- a/handler/convert.ts +++ b/handler/convert.ts @@ -3,36 +3,42 @@ const fs = require('fs'); const mkdirp = require('mkdirp'); const async = require('async'); -const bidsEntitiesOrdered = require('../ui/src/assets/schema/rules/entities.json') +const bidsEntitiesOrdered = require('../ui/src/assets/schema/rules/entities.json'); //import { IObject, Subject, Session, OrganizedSession } from '../ui/src/store' const root = process.argv[2]; -if(!root) throw "please specify root directory"; +if (!root) throw 'please specify root directory'; -const info = JSON.parse(fs.readFileSync(root+"/finalized.json")); +const info = JSON.parse(fs.readFileSync(root + '/finalized.json')); //order the entityMappings correctly, as specified by the BIDS specification let newEntityOrdering = {}; -Object.values(bidsEntitiesOrdered).forEach(order=>{ - Object.keys(info.entityMappings).forEach(key=>{ - if(order == key) { - newEntityOrdering[key] = info.entityMappings[key] +Object.values(bidsEntitiesOrdered).forEach((order) => { + Object.keys(info.entityMappings).forEach((key) => { + if (order == key) { + newEntityOrdering[key] = info.entityMappings[key]; } - }) -}) -info.entityMappings = newEntityOrdering + }); +}); +info.entityMappings = newEntityOrdering; const datasetName = info.datasetDescription.Name; -mkdirp.sync(root+"/bids/"+datasetName); -fs.writeFileSync(root+"/bids/"+datasetName+"/finalized.json", JSON.stringify(info, null, 4)); //copy the finalized.json -fs.writeFileSync(root+"/bids/"+datasetName+"/dataset_description.json", JSON.stringify(info.datasetDescription, null, 4)); -fs.writeFileSync(root+"/bids/"+datasetName+"/.bidsignore", ` +mkdirp.sync(root + '/bids/' + datasetName); +fs.writeFileSync(root + '/bids/' + datasetName + '/finalized.json', JSON.stringify(info, null, 4)); //copy the finalized.json +fs.writeFileSync( + root + '/bids/' + datasetName + '/dataset_description.json', + JSON.stringify(info.datasetDescription, null, 4) +); +fs.writeFileSync( + root + '/bids/' + datasetName + '/.bidsignore', + ` **/excluded **/*_MP2RAGE.* *finalized.json -`); +` +); info.readme += ` @@ -41,94 +47,96 @@ info.readme += ` This dataset was converted from DICOM to BIDS using ezBIDS (https://brainlife.io/ezbids) `; -fs.writeFileSync(root+"/bids/"+datasetName+"/README", info.readme); -fs.writeFileSync(root+"/bids/"+datasetName+"/participants.json", JSON.stringify(info.participantsColumn, null, 4)); +fs.writeFileSync(root + '/bids/' + datasetName + '/README', info.readme); +fs.writeFileSync( + root + '/bids/' + datasetName + '/participants.json', + JSON.stringify(info.participantsColumn, null, 4) +); //convert participants.json to tsv -console.log("outputting participants.json/tsv"); -let keys = ["participant_id"]; -for(let key in info.participantsColumn) { +console.log('outputting participants.json/tsv'); +let keys = ['participant_id']; +for (let key in info.participantsColumn) { keys.push(key); } let tsv = []; let tsvheader = []; -for(let key of keys) { +for (let key of keys) { tsvheader.push(key); } tsv.push(tsvheader); -for(const subject_idx in info.participantInfo) { +for (const subject_idx in info.participantInfo) { const sub = info.subjects[subject_idx]; let tsvrec = []; - tsvrec.push("sub-"+sub.subject); - for(let key in info.participantsColumn) { - tsvrec.push(info.participantInfo[subject_idx][key]||'n/a'); + tsvrec.push('sub-' + sub.subject); + for (let key in info.participantsColumn) { + tsvrec.push(info.participantInfo[subject_idx][key] || 'n/a'); } tsv.push(tsvrec); } -let tsvf = fs.openSync(root+"/bids/"+datasetName+"/participants.tsv", "w"); -for(let rec of tsv) { - fs.writeSync(tsvf, rec.join("\t")+"\n"); +let tsvf = fs.openSync(root + '/bids/' + datasetName + '/participants.tsv', 'w'); +for (let rec of tsv) { + fs.writeSync(tsvf, rec.join('\t') + '\n'); } fs.closeSync(tsvf); //handle each objects -async.forEachOf(info.objects, (o, idx, next_o)=>{ - - if(o._type == "exclude" || o._exclude) { - o._type = "excluded/obj"+o.idx; +async.forEachOf(info.objects, (o, idx, next_o) => { + if (o._type == 'exclude' || o._exclude) { + o._type = 'excluded/obj' + o.idx; o._entities.description = o._SeriesDescription.replace(/[^0-9a-z]/gi, ''); //inject series desc to filename, remove non-alphanum chars } - let typeTokens = o._type.split("/"); + let typeTokens = o._type.split('/'); let modality = typeTokens[0]; //func, dwi, anat, etc.. (or exclude) let suffix = typeTokens[1]; //t1w, bold, or "objN" for exclude) //construct basename let tokens = []; //for(let k in o._entities) { - for(let k in info.entityMappings) { + for (let k in info.entityMappings) { const sk = info.entityMappings[k]; - if(o._entities[k]) { - tokens.push(sk+"-"+o._entities[k]); + if (o._entities[k]) { + tokens.push(sk + '-' + o._entities[k]); } } - if(o._exclude) { + if (o._exclude) { //excluded object doesn't have to be validated, so some of the item might collide.. //let's prevent it by setting some artificial tag - tokens.push("ezbids-"+idx); + tokens.push('ezbids-' + idx); } - const name = tokens.join("_"); + const name = tokens.join('_'); function composePath(derivatives) { - let path = "bids/"+datasetName; - if(derivatives) path += "/derivatives/"+derivatives; - path += "/sub-"+o._entities.subject; - if(o._entities.session) path += "/ses-"+o._entities.session; - path += "/"+modality; + let path = 'bids/' + datasetName; + if (derivatives) path += '/derivatives/' + derivatives; + path += '/sub-' + o._entities.subject; + if (o._entities.session) path += '/ses-' + o._entities.session; + path += '/' + modality; return path; } function handleItem(item, filename, derivatives = null) { const path = composePath(derivatives); - mkdirp.sync(root+"/"+path); + mkdirp.sync(root + '/' + path); //setup directory - let fullpath = root+"/"+path+"/"+name+"_"+filename; + let fullpath = root + '/' + path + '/' + name + '_' + filename; - if(item.name == "json") { + if (item.name == 'json') { //we create sidecar from sidecar object (edited by the user) item.content = JSON.stringify(item.sidecar, null, 4); } - if(item.content) { + if (item.content) { //if item has content to write, then use it instead of normal file fs.writeFileSync(fullpath, item.content); - } else{ + } else { //otherwise, assume to be normal files (link from the source) try { fs.lstatSync(fullpath); @@ -140,35 +148,35 @@ async.forEachOf(info.objects, (o, idx, next_o)=>{ //I need to use hardlink so that when archiver tries to create .zip in download API //the files will be found. As far as I know, archiver module can't de-reference //symlinks - fs.linkSync(root+"/"+item.path, fullpath); + fs.linkSync(root + '/' + item.path, fullpath); } } function handlePET() { - o.items.forEach(item => { + o.items.forEach((item) => { let derivatives = null; switch (item.name) { - case "nii.gz": - handleItem(item, suffix + ".nii.gz", derivatives); + case 'nii.gz': + handleItem(item, suffix + '.nii.gz', derivatives); break; - case "json": - handleItem(item, suffix + ".json", derivatives); + case 'json': + handleItem(item, suffix + '.json', derivatives); break; - case "tsv": - handleItem(item, suffix + ".tsv", derivatives); + case 'tsv': + handleItem(item, suffix + '.tsv', derivatives); break; default: - console.error("unknown PET item name", item.name); + console.error('unknown PET item name', item.name); } }); } function handlePerf() { - o.items.forEach(item => { + o.items.forEach((item) => { let derivatives = null; switch (item.name) { - case "nii.gz": - handleItem(item, suffix + ".nii.gz", derivatives); + case 'nii.gz': + handleItem(item, suffix + '.nii.gz', derivatives); break; - case "json": + case 'json': //handle IntendedFor if (o.IntendedFor) { item.sidecar.IntendedFor = []; @@ -180,31 +188,29 @@ async.forEachOf(info.objects, (o, idx, next_o)=>{ continue; } //if intended object is excluded, skip it - if (io._type == "exclude") continue; + if (io._type == 'exclude') continue; - const iomodality = io._type.split("/")[0]; - const suffix = io._type.split("/")[1]; + const iomodality = io._type.split('/')[0]; + const suffix = io._type.split('/')[1]; //construct a path relative to the subject - let path = ""; - if (io._entities.session) - path += "ses-" + io._entities.session + "/"; - path += iomodality + "/"; + let path = ''; + if (io._entities.session) path += 'ses-' + io._entities.session + '/'; + path += iomodality + '/'; let tokens = []; //for(let k in io._entities) { for (let k in info.entityMappings) { const sk = info.entityMappings[k]; - if (io._entities[k]) - tokens.push(sk + "-" + io._entities[k]); + if (io._entities[k]) tokens.push(sk + '-' + io._entities[k]); } - path += tokens.join("_"); - path += "_" + suffix + ".nii.gz"; //TODO - not sure if this is robust enough.. + path += tokens.join('_'); + path += '_' + suffix + '.nii.gz'; //TODO - not sure if this is robust enough.. item.sidecar.IntendedFor.push(path); } - } - handleItem(item, suffix + ".json", derivatives); + } + handleItem(item, suffix + '.json', derivatives); break; default: - console.error("unknown Perfusion item name", item.name); + console.error('unknown Perfusion item name', item.name); } }); } @@ -228,43 +234,43 @@ async.forEachOf(info.objects, (o, idx, next_o)=>{ */ //find manufacturer (used by UNIT1 derivatives) - let manufacturer = "UnknownManufacturer"; - o.items.forEach(item=>{ - if(item.sidecar && item.sidecar.Manufacturer) manufacturer = item.sidecar.Manufacturer; + let manufacturer = 'UnknownManufacturer'; + o.items.forEach((item) => { + if (item.sidecar && item.sidecar.Manufacturer) manufacturer = item.sidecar.Manufacturer; }); - o.items.forEach(item=>{ + o.items.forEach((item) => { let derivatives = null; - if(suffix == "UNIT1") derivatives = manufacturer; + if (suffix == 'UNIT1') derivatives = manufacturer; - switch(item.name) { - case "nii.gz": - if(o.defaced && o.defaceSelection == "defaced") { - item.path = item.path+".defaced.nii.gz"; - console.log("using defaced version of t1w", item.path); - } - handleItem(item, suffix+".nii.gz", derivatives); - break; - case "json": - //handle B0FieldIdentifier and B0FieldSource if present - if(o.B0FieldIdentifier) { - if(o.B0FieldIdentifier.length > 1) { - item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier) - }else{ - item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0] + switch (item.name) { + case 'nii.gz': + if (o.defaced && o.defaceSelection == 'defaced') { + item.path = item.path + '.defaced.nii.gz'; + console.log('using defaced version of t1w', item.path); } - } - if(o.B0FieldSource) { - if(o.B0FieldSource.length > 1) { - item.sidecar.B0FieldSource = Object.values(o.B0FieldSource) - }else{ - item.sidecar.B0FieldSource = o.B0FieldSource[0] + handleItem(item, suffix + '.nii.gz', derivatives); + break; + case 'json': + //handle B0FieldIdentifier and B0FieldSource if present + if (o.B0FieldIdentifier) { + if (o.B0FieldIdentifier.length > 1) { + item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier); + } else { + item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0]; + } } - } - handleItem(item, suffix+".json", derivatives); - break; - default: - console.error("unknown anat item name", item.name); + if (o.B0FieldSource) { + if (o.B0FieldSource.length > 1) { + item.sidecar.B0FieldSource = Object.values(o.B0FieldSource); + } else { + item.sidecar.B0FieldSource = o.B0FieldSource[0]; + } + } + handleItem(item, suffix + '.json', derivatives); + break; + default: + console.error('unknown anat item name', item.name); } }); } @@ -277,65 +283,67 @@ async.forEachOf(info.objects, (o, idx, next_o)=>{ - sbref */ - if(suffix == "events") { + if (suffix == 'events') { //we handle events a bit differently.. we need to generate events.tsv from items content - const events = o.items.find(o=>!!o.eventsBIDS); - const headers = Object.keys(events.eventsBIDS[0]) //take first index value to see which columns user selected - events.content = headers.join("\t")+"\n"; - events.eventsBIDS.forEach(rec=>{ - if(rec.stim_file) { - if(!rec.stim_file.startsWith("/stimuli/")) { - rec.stim_file = "/stimuli/" + rec.stim_file + const events = o.items.find((o) => !!o.eventsBIDS); + const headers = Object.keys(events.eventsBIDS[0]); //take first index value to see which columns user selected + events.content = headers.join('\t') + '\n'; + events.eventsBIDS.forEach((rec) => { + if (rec.stim_file) { + if (!rec.stim_file.startsWith('/stimuli/')) { + rec.stim_file = '/stimuli/' + rec.stim_file; } } const row = []; - headers.forEach(key=>{ + headers.forEach((key) => { row.push(rec[key]); }); - events.content += row.join("\t")+"\n"; + events.content += row.join('\t') + '\n'; }); //add stuff to sidecar - const sidecar = o.items.find(o=>o.name == "json"); + const sidecar = o.items.find((o) => o.name == 'json'); //sidecar.sidecar.TaskName = o._entities.task; sidecar.sidecar.trial_type = { LongName: info.events.trialTypes.longName, Description: info.events.trialTypes.desc, Levels: info.events.trialTypes.levels, - } + }; //now save - handleItem(events, "events.tsv"); - handleItem(sidecar, "events.json"); + handleItem(events, 'events.tsv'); + handleItem(sidecar, 'events.json'); } else { - //normal func stuff.. - o.items.forEach(item=>{ - switch(item.name) { - case "nii.gz": - handleItem(item, suffix+".nii.gz"); - break; - case "json": - //handle B0FieldIdentifier and B0FieldSource if present - if(o.B0FieldIdentifier.length) { - if(o.B0FieldIdentifier.length > 1) { - item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier) - }else{ - item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0] + o.items.forEach((item) => { + switch (item.name) { + case 'nii.gz': + handleItem(item, suffix + '.nii.gz'); + break; + case 'tsv.gz': + handleItem(item, suffix + '.tsv.gz'); + break; + case 'json': + //handle B0FieldIdentifier and B0FieldSource if present + if (o.B0FieldIdentifier.length) { + if (o.B0FieldIdentifier.length > 1) { + item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier); + } else { + item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0]; + } } - } - if(o.B0FieldSource.length) { - if(o.B0FieldSource.length > 1) { - item.sidecar.B0FieldSource = Object.values(o.B0FieldSource) - }else{ - item.sidecar.B0FieldSource = o.B0FieldSource[0] + if (o.B0FieldSource.length) { + if (o.B0FieldSource.length > 1) { + item.sidecar.B0FieldSource = Object.values(o.B0FieldSource); + } else { + item.sidecar.B0FieldSource = o.B0FieldSource[0]; + } } - } - item.sidecar.TaskName = o._entities.task; - handleItem(item, suffix+".json"); - break; - default: - console.error("unknown func item name", item.name); + item.sidecar.TaskName = o._entities.task; + handleItem(item, suffix + '.json'); + break; + default: + console.error('unknown func item name', item.name); } }); } @@ -352,150 +360,150 @@ async.forEachOf(info.objects, (o, idx, next_o)=>{ - magnitude - fieldmap */ - o.items.forEach(item=>{ - switch(item.name) { - case "nii.gz": - handleItem(item, suffix+".nii.gz"); - break; - case "json": - //handle B0FieldIdentifier and B0FieldSource if present - if(o.B0FieldIdentifier.length) { - if(o.B0FieldIdentifier.length > 1) { - item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier) - }else{ - item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0] - } - } - if(o.B0FieldSource.length) { - if(o.B0FieldSource.length > 1) { - item.sidecar.B0FieldSource = Object.values(o.B0FieldSource) - }else{ - item.sidecar.B0FieldSource = o.B0FieldSource[0] + o.items.forEach((item) => { + switch (item.name) { + case 'nii.gz': + handleItem(item, suffix + '.nii.gz'); + break; + case 'json': + //handle B0FieldIdentifier and B0FieldSource if present + if (o.B0FieldIdentifier.length) { + if (o.B0FieldIdentifier.length > 1) { + item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier); + } else { + item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0]; + } } - } - //handle IntendedFor - if(o.IntendedFor) { - item.sidecar.IntendedFor = []; - for(let idx of o.IntendedFor) { - const io = info.objects[idx]; - - //this should not happen, but ezBIDS_core.json could be corrupted.. - if(!io) { - console.error("can't find object with ", idx); - continue; + if (o.B0FieldSource.length) { + if (o.B0FieldSource.length > 1) { + item.sidecar.B0FieldSource = Object.values(o.B0FieldSource); + } else { + item.sidecar.B0FieldSource = o.B0FieldSource[0]; } + } + //handle IntendedFor + if (o.IntendedFor) { + item.sidecar.IntendedFor = []; + for (let idx of o.IntendedFor) { + const io = info.objects[idx]; - //if intended object is excluded, skip it - if(io._type == "exclude") continue; - - const iomodality = io._type.split("/")[0]; - const suffix = io._type.split("/")[1]; - - //construct a path relative to the subject - let path = ""; - if(io._entities.session) path += "ses-"+io._entities.session+"/"; - path += iomodality+"/"; - let tokens = []; - //for(let k in io._entities) { - for(let k in info.entityMappings) { - const sk = info.entityMappings[k]; - if(io._entities[k]) tokens.push(sk+"-"+io._entities[k]); - } - path += tokens.join("_"); - path += "_"+suffix+".nii.gz"; //TODO - not sure if this is robust enough.. + //this should not happen, but ezBIDS_core.json could be corrupted.. + if (!io) { + console.error("can't find object with ", idx); + continue; + } + + //if intended object is excluded, skip it + if (io._type == 'exclude') continue; + + const iomodality = io._type.split('/')[0]; + const suffix = io._type.split('/')[1]; + + //construct a path relative to the subject + let path = ''; + if (io._entities.session) path += 'ses-' + io._entities.session + '/'; + path += iomodality + '/'; + let tokens = []; + //for(let k in io._entities) { + for (let k in info.entityMappings) { + const sk = info.entityMappings[k]; + if (io._entities[k]) tokens.push(sk + '-' + io._entities[k]); + } + path += tokens.join('_'); + path += '_' + suffix + '.nii.gz'; //TODO - not sure if this is robust enough.. - item.sidecar.IntendedFor.push(path); + item.sidecar.IntendedFor.push(path); + } } - } - handleItem(item, suffix+".json"); - break; - default: - console.error("unknown fmap item name", item.name); + handleItem(item, suffix + '.json'); + break; + default: + console.error('unknown fmap item name', item.name); } }); } function handleDwi() { - o.items.forEach(item=>{ - switch(item.name) { - case "nii.gz": - handleItem(item, "dwi.nii.gz"); - break; - case "bvec": - handleItem(item, "dwi.bvec"); - break; - case "bval": - handleItem(item, "dwi.bval"); - break; - case "json": - //handle B0FieldIdentifier and B0FieldSource if present - if(o.B0FieldIdentifier.length) { - if(o.B0FieldIdentifier.length > 1) { - item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier) - }else{ - item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0] + o.items.forEach((item) => { + switch (item.name) { + case 'nii.gz': + handleItem(item, 'dwi.nii.gz'); + break; + case 'bvec': + handleItem(item, 'dwi.bvec'); + break; + case 'bval': + handleItem(item, 'dwi.bval'); + break; + case 'json': + //handle B0FieldIdentifier and B0FieldSource if present + if (o.B0FieldIdentifier.length) { + if (o.B0FieldIdentifier.length > 1) { + item.sidecar.B0FieldIdentifier = Object.values(o.B0FieldIdentifier); + } else { + item.sidecar.B0FieldIdentifier = o.B0FieldIdentifier[0]; + } } - } - if(o.B0FieldSource.length) { - if(o.B0FieldSource.length > 1) { - item.sidecar.B0FieldSource = Object.values(o.B0FieldSource) - }else{ - item.sidecar.B0FieldSource = o.B0FieldSource[0] + if (o.B0FieldSource.length) { + if (o.B0FieldSource.length > 1) { + item.sidecar.B0FieldSource = Object.values(o.B0FieldSource); + } else { + item.sidecar.B0FieldSource = o.B0FieldSource[0]; + } } - } - handleItem(item, "dwi.json"); - break; - default: - console.error("unknown dwi item name", item.name); + handleItem(item, 'dwi.json'); + break; + default: + console.error('unknown dwi item name', item.name); } }); - if(!o.items.find(item=>item.name == "bvec")) { - console.log("bvec is missing.. assuming that this is b0, and setup empty bvec/bval"); + if (!o.items.find((item) => item.name == 'bvec')) { + console.log('bvec is missing.. assuming that this is b0, and setup empty bvec/bval'); const path = composePath(false); const zeros = []; - for(let j = 0;j < o.analysisResults.NumVolumes; ++j) { + for (let j = 0; j < o.analysisResults.NumVolumes; ++j) { zeros.push(0); } - const bvec = `${zeros.join(" ")}\n${zeros.join(" ")}\n${zeros.join(" ")}\n`; - fs.writeFileSync(root+"/"+path+"/"+name+"_dwi.bvec", bvec); + const bvec = `${zeros.join(' ')}\n${zeros.join(' ')}\n${zeros.join(' ')}\n`; + fs.writeFileSync(root + '/' + path + '/' + name + '_dwi.bvec', bvec); - const bval = zeros.join(" ")+"\n"; - fs.writeFileSync(root+"/"+path+"/"+name+"_dwi.bval", bval); + const bval = zeros.join(' ') + '\n'; + fs.writeFileSync(root + '/' + path + '/' + name + '_dwi.bval', bval); } } //now handle different modality - switch(modality) { - case "anat": - handleAnat(); - break; - case "func": - handleFunc(); - break; - case "fmap": - handleFmap(); - break; - case "dwi": - handleDwi(); - break; - case "perf": - handlePerf(); - break; - case "pet": - handlePET(); - break; - case "excluded": - if(!info.includeExcluded) break; - o.items.forEach((item, idx)=>{ - //sub-OpenSciJan22_desc-localizer_obj5-0.json - handleItem(item, "excluded."+item.name); - }); - break; + switch (modality) { + case 'anat': + handleAnat(); + break; + case 'func': + handleFunc(); + break; + case 'fmap': + handleFmap(); + break; + case 'dwi': + handleDwi(); + break; + case 'perf': + handlePerf(); + break; + case 'pet': + handlePET(); + break; + case 'excluded': + if (!info.includeExcluded) break; + o.items.forEach((item, idx) => { + //sub-OpenSciJan22_desc-localizer_obj5-0.json + handleItem(item, 'excluded.' + item.name); + }); + break; - default: - console.error("unknown datatype:"+o._type); + default: + console.error('unknown datatype:' + o._type); } next_o(); }); diff --git a/handler/ezBIDS_core/createThumbnailsMovies.py b/handler/ezBIDS_core/createThumbnailsMovies.py index 42e88deb..ad07ea1e 100755 --- a/handler/ezBIDS_core/createThumbnailsMovies.py +++ b/handler/ezBIDS_core/createThumbnailsMovies.py @@ -162,7 +162,7 @@ def create_DWIshell_thumbnails(img_file, image, bval_file): print("") create_MEG_thumbnail(img_file) else: - if not img_file.endswith('blood.json'): + if not img_file.endswith(('blood.json', '_physio.tsv.gz', '_physioevents.tsv.gz')): output_dir = img_file.split(".nii.gz")[0] image = nib.load(img_file) diff --git a/handler/ezBIDS_core/ezBIDS_core.py b/handler/ezBIDS_core/ezBIDS_core.py index d3576bbf..05fd5aa9 100755 --- a/handler/ezBIDS_core/ezBIDS_core.py +++ b/handler/ezBIDS_core/ezBIDS_core.py @@ -41,7 +41,7 @@ cog_atlas_url = "http://cognitiveatlas.org/api/v-alpha/task" -accepted_datatypes = ["anat", "dwi", "fmap", "func", "perf", "pet", "meg"] # Will add others later +accepted_datatypes = ["anat", "dwi", "fmap", "func", "meg", "perf", "pet", "beh"] # Will add others later MEG_extensions = [".ds", ".fif", ".sqd", ".con", ".raw", ".ave", ".mrk", ".kdf", ".mhd", ".trg", ".chn", ".dat"] @@ -310,6 +310,9 @@ def fix_multiple_dots(uploaded_img_list): elif img_file.endswith('.v.gz') and img_file.count('.') > 2: # ECAT-formatted PET fix = True ext = '.v.gz' + elif img_file.endswith('.tsv.gz') and img_file.count('.') > 2: # Eyetracking + fix = True + ext = '.tsv.gz' elif img_file.endswith('.json') and img_file.count('.') > 1: # for PET blood fix = True ext = '.json' @@ -339,6 +342,8 @@ def fix_multiple_dots(uploaded_img_list): ext = '.nii.gz' elif typo.endswith('.v.gz'): ext = '.v.gz' + elif typo.endswith('.tsv.gz'): + ext = '.tsv.gz' else: ext = '.' + typo.split('.')[-1] @@ -492,10 +497,13 @@ def modify_uploaded_dataset_list(uploaded_img_list): ext = '.v.gz' elif img_file.endswith('.ds'): ext = '.ds' + elif img_file.endswith('.tsv.gz'): + ext = '.tsv.gz' else: ext = Path(img_file).suffix - if not img_file.endswith(tuple(MEG_extensions)) and not img_file.endswith('blood.json'): + if (not img_file.endswith(tuple(MEG_extensions)) and not img_file.endswith('blood.json') + and not img_file.endswith('.tsv.gz')): try: nib.load(img_file) except: @@ -515,7 +523,7 @@ def modify_uploaded_dataset_list(uploaded_img_list): elif any(x.endswith(tuple(['.v', '.v.gz'])) for x in grouped_files): grouped_files = [x for x in grouped_files if not x.endswith(tuple(['.v', '.v.gz']))] - # Don't want this section is we're allowing only NIfTI files to be uploaded (group length will only be 1). + # Don't want this section if we're allowing only NIfTI files to be uploaded (group length will only be 1). # # If imaging file comes with additional data (JSON, bval/bvec) add them to list for processing # if len(grouped_files) > 1: # uploaded_files_list.append(grouped_files) @@ -914,11 +922,13 @@ def generate_dataset_list(uploaded_files_list, exclude_data): # Create list for appending dictionaries to dataset_list = [] - # Get separate nifti and json (i.e. sidecar) lists + # Get separate data (e.g. nifti) and json (i.e. sidecar) lists img_list = natsorted( [ - x for x in uploaded_files_list if x.endswith('nii.gz') + x for x in uploaded_files_list + if x.endswith('nii.gz') or x.endswith('blood.json') + or x.endswith('.tsv.gz') or x.endswith(tuple(MEG_extensions)) ] ) @@ -945,6 +955,8 @@ def generate_dataset_list(uploaded_files_list, exclude_data): ext = '.v.gz' elif img_file.endswith('.ds'): ext = '.ds' + elif img_file.endswith('.tsv.gz'): + ext = '.tsv.gz' else: ext = Path(img_file).suffix @@ -959,6 +971,9 @@ def generate_dataset_list(uploaded_files_list, exclude_data): json_path = corresponding_json[0] json_data = open(corresponding_json[0]) json_data = json.load(json_data, strict=False) + if ext == '.tsv.gz': + from mne_bids.sidecar_updates import _update_sidecar + _update_sidecar(json_path, "ConversionSoftware", "n/a") else: json_path = img_file.split(ext)[0] + '.json' json_data = { @@ -1233,9 +1248,9 @@ def generate_dataset_list(uploaded_files_list, exclude_data): if not os.path.exists(json_path): with open(json_path, "w") as fp: json.dump(json_data, fp, indent=3) - corresponding_files_list = corresponding_files_list + [json_path] json_data = open(json_path) json_data = json.load(json_data, strict=False) + corresponding_files_list = corresponding_files_list + [json_path] # Files (JSON, bval/bvec, tsv) associated with imaging file corresponding_file_paths = [ @@ -1877,16 +1892,18 @@ def create_lookup_info(): suffixes = [x for x in suffixes if x not in ["m0scan"]] elif datatype == "func": # Remove non-imaging suffixes - suffixes = [x for x in suffixes if x not in ["events", "stim", "physio", "phase"]] + suffixes = [x for x in suffixes if x not in ["events", "stim", "phase"]] elif datatype == "perf": # Remove non-imaging suffixes - suffixes = [x for x in suffixes if x not in ["aslcontext", "asllabeling", "physio", "stim"]] + suffixes = [x for x in suffixes if x not in ["aslcontext", "asllabeling", "stim"]] elif datatype == "pet": # Only keep imaging suffixes suffixes = [x for x in suffixes if x in ["pet", "blood"]] elif datatype == "meg": # MEG files are weird, can have calibration and crosstalk files with the same datatype/suffix pair suffixes = [x for x in suffixes if x == "meg" and key == "meg"] + elif datatype == "beh": + suffixes = [x for x in suffixes if x not in ["stim"]] for suffix in suffixes: @@ -2367,6 +2384,11 @@ def datatype_suffix_identification(dataset_list_unique_series, lookup_dic, confi "because the file path ends with '_blood.json. " \ "Please modify if incorrect." + if json_path.endswith('_physio.tsv.gz'): + unique_dic["suffix"] = 'physio' + if json_path.endswith('_physioevents.tsv.gz'): + unique_dic["suffix"] = 'physioevents' + """ If no luck with the json paths, and assuming an ezBIDS configuration file wasn't provided, try discerning datatype and suffix with dcm2niix's BidsGuess. And if that doesn't produce anything, try with search terms @@ -2977,6 +2999,13 @@ def modify_objects_info(dataset_list): "name": name, "pngPaths": [], "headers": protocol["headers"]}) + elif item.endswith('tsv.gz'): + items.append({ + "path": item, + "name": 'tsv.gz', + "pngPaths": [], + "headers": protocol["headers"] + }) # Objects-level info for ezBIDS_core.json objects_info = { @@ -3111,6 +3140,7 @@ def check_dwi_b0maps(dataset_list_unique_series): # Create the dataset list of dictionaries dataset_list = generate_dataset_list(uploaded_files_list, exclude_data) + # Get pesudo subject (and session) info dataset_list = organize_dataset(dataset_list) @@ -3120,6 +3150,7 @@ def check_dwi_b0maps(dataset_list_unique_series): # Make a new list containing the dictionaries of only unique dataset acquisitions dataset_list, dataset_list_unique_series = determine_unique_series(dataset_list, bids_compliant) + # If ezBIDS configuration file detected in upload, use that for datatype, suffix, and entity identifications if config is True: readme, dataset_description_dic, participants_column_info, dataset_list_unique_series, subs_information, events, \ diff --git a/handler/find_img_data.py b/handler/find_img_data.py index 914364b6..0c02eecf 100755 --- a/handler/find_img_data.py +++ b/handler/find_img_data.py @@ -59,6 +59,7 @@ def find_img_data(dir): pet_ecat_files_list = [] pet_dcm_dirs_list = [] meg_data_list = [] +eyetracking_data_list = [] find_img_data('.') @@ -99,7 +100,14 @@ def find_img_data(dir): # TODO - won't this remove different extensions? meg_data_list = [x for x in meg_data_list[0].split('\n') if x != '' and 'hz.ds' not in x] -# Save the MRI, PET, MEG, and NIfTI lists (if they exist) to separate files +# Eyetracking +eye_extensions = ['*.edf'] # Add as more become known +for eye_ext in eye_extensions: + find_cmd = os.popen(f"find . -maxdepth 9 -type f -name '{eye_ext}'").read() + if find_cmd != '': + eyetracking_data_list.append(find_cmd) + +# Save the MRI, PET, MEG, and Eyetracking lists (if they exist) to separate files file = open(f'{root}/dcm2niix.list', 'w') if len(mri_dcm_dirs_list): for dcm in mri_dcm_dirs_list: @@ -123,3 +131,31 @@ def find_img_data(dir): for meg in meg_data_list: file.write(meg + '\n') file.close() + +if len(eyetracking_data_list): + file = open(f'{root}/eyetracking.list', 'w') + metadata = [] + find_metadata_cmd = os.popen("find . -maxdepth 9 -type f -name metadata.yml").read() + if find_metadata_cmd != '': + metadata.append(find_metadata_cmd) + output_counter = 1 + for eye in eyetracking_data_list: + eye = eye.strip() + data_dir = os.path.dirname(eye) + output_dir = f"{data_dir}/output/{output_counter}" + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + try: + if len(metadata): + metadata = metadata[-1] + os.system(f"eye2bids --input_file {eye} --output_dir {output_dir} --metadata_file {metadata}") + else: + os.system(f"eye2bids --input_file {eye} --output_dir {output_dir} --force") + output_data_files = [f'{output_dir}/{x}' for x in os.listdir(output_dir) if x.endswith('.tsv.gz')] + if len(output_data_files): + for o in output_data_files: + file.write(o + '\n') + except: + print('Could not convert eyetracking .edf format data.') + output_counter += 1 + file.close() diff --git a/handler/preprocess.sh b/handler/preprocess.sh index b1b8af37..987deae0 100755 --- a/handler/preprocess.sh +++ b/handler/preprocess.sh @@ -22,9 +22,12 @@ echo "running preprocess.sh on root folder ${root}" echo "running expand.sh" ./expand.sh $root -echo "replace file paths that contain space, quotation, or [@^()] characters" +echo "Replace file paths that contain space, quotation, or [@^()] characters" find "$root" -depth -name "*[ @^()]*" -print0 | sort -rz | xargs -0 -n 1 -I {} ./rename_special_chars.sh {} +echo "Additionally, ensure file extensions do not have capital letters in them (e.g. .NII.GZ --> .nii.gz)" +find "$root" -depth -type f -name '*.[A-Z]*' -exec rename 's/\.([A-Z]+)$/.\L$1/' {} \; + # check to see if uploaded data is a BIDS-compliant dataset echo "Running bids-validator to check BIDS compliance" @@ -243,6 +246,10 @@ else cat $root/meg.list >> $root/list fi + if [ -f $root/eyetracking.list ]; then + cat $root/eyetracking.list >> $root/list + fi + if [ ! -s $root/list ]; then err_file='' if [ `grep 'Error' $root/dcm2niix_error | wc -l` -ne 0 ]; then diff --git a/ui/src/Objects.vue b/ui/src/Objects.vue index 97befa3a..e5eba0f9 100755 --- a/ui/src/Objects.vue +++ b/ui/src/Objects.vue @@ -296,7 +296,8 @@ so._type.startsWith('fmap') || so._type.startsWith('dwi') || so._type.startsWith('anat') || - so._type.startsWith('meg') + so._type.startsWith('meg') || + so._type.startsWith('beh') " label="Relevant Metadata" > @@ -395,6 +396,7 @@ import datatype from './components/datatype.vue'; import ModalityForm from './components/modalityForm.vue'; import anatYaml from '../src/assets/schema/rules/sidecars/anat.yaml'; +import behYaml from '../src/assets/schema/rules/sidecars/beh.yaml'; import funcYaml from '../src/assets/schema/rules/sidecars/func.yaml'; import fmapYaml from '../src/assets/schema/rules/sidecars/fmap.yaml'; import dwiYaml from '../src/assets/schema/rules/sidecars/dwi.yaml'; diff --git a/ui/src/SeriesPage.vue b/ui/src/SeriesPage.vue index 04ff3cc6..4fdaf2c3 100755 --- a/ui/src/SeriesPage.vue +++ b/ui/src/SeriesPage.vue @@ -238,7 +238,8 @@ ss.type.startsWith('fmap') || ss.type.startsWith('dwi') || ss.type.startsWith('anat') || - ss.type.startsWith('meg') + ss.type.startsWith('meg') || + ss.type.startsWith('beh') " label="Relevant Metadata" > @@ -315,6 +316,7 @@ import { Series, IObject, IEzbids } from './store'; import { validateEntities, validate_B0FieldIdentifier_B0FieldSource, metadataAlerts } from './libUnsafe'; import anatYaml from '../src/assets/schema/rules/sidecars/anat.yaml'; +import behYaml from '../src/assets/schema/rules/sidecars/beh.yaml'; import funcYaml from '../src/assets/schema/rules/sidecars/func.yaml'; import fmapYaml from '../src/assets/schema/rules/sidecars/fmap.yaml'; import dwiYaml from '../src/assets/schema/rules/sidecars/dwi.yaml'; @@ -420,6 +422,8 @@ export default defineComponent({ bidsDatatypeMetadata = anatYaml; } else if (s.type.startsWith('meg')) { bidsDatatypeMetadata = megYaml; + } else if (s.type.startsWith('beh')) { + bidsDatatypeMetadata = behYaml; } const metadataAlertsFields = metadataAlerts( @@ -432,9 +436,9 @@ export default defineComponent({ // console.log(s.series_idx, s.type); // console.log('metadataAlertsFields', metadataAlertsFields); if (metadataAlertsFields.length) { - let warn: string = `'Required metadata is missing, provided metadata field values have improper + let warn: string = `Required metadata is missing or provided metadata field values have improper format. Please click on the "Edit Metadata" button below to resolve. You may skip fields for which you - do not know the proper value, but you will not have a fully BIDS-compliant dataset.'`; + do not know the proper value, but you will not have a fully BIDS-compliant dataset.`; s.validationWarnings.push(warn); } diff --git a/ui/src/assets/schema/rules/sidecars/beh.yaml b/ui/src/assets/schema/rules/sidecars/beh.yaml new file mode 100644 index 00000000..0cd73e87 --- /dev/null +++ b/ui/src/assets/schema/rules/sidecars/beh.yaml @@ -0,0 +1,27 @@ +# +# Groups of related metadata fields +# +# Assumptions: never need disjunction of selectors +# Assumptions: top-to-bottom overrides is sufficient logic + +--- +# Metadata for either beh or events files +BEHTaskInformation: + selectors: + - datatype == "beh" + - intersects([suffix], ["beh", "events"]) + fields: + TaskName: recommended + Instructions: recommended + TaskDescription: recommended + CogAtlasID: recommended + CogPOID: recommended + +BEHInstitutionInformation: + selectors: + - datatype == "beh" + - intersects([suffix], ["beh", "events"]) + fields: + InstitutionName: recommended + InstitutionAddress: recommended + InstitutionalDepartmentName: recommended \ No newline at end of file diff --git a/ui/src/components/modalityForm.vue b/ui/src/components/modalityForm.vue index 4948c129..58e8f738 100644 --- a/ui/src/components/modalityForm.vue +++ b/ui/src/components/modalityForm.vue @@ -577,6 +577,7 @@