Skip to content
Snippets Groups Projects
Commit a09416af authored by Fidel Alfaro Almagro's avatar Fidel Alfaro Almagro :speech_balloon:
Browse files

Fixing small bugs

parent 0527049f
No related branches found
No related tags found
No related merge requests found
Pipeline #17769 passed
No preview for this file type
......@@ -45,15 +45,15 @@ class Context(Config):
env = {'BIPDIR' : bip.BIPDIR,
'FSLDIR' : os.environ['FSLDIR']}
self.subject = subject
self.BIPDIR = env['BIPDIR']
self.FSLDIR = env['FSLDIR']
if not cfgdir:
cfgdir = self.get_data('config/config.toml')
Config.__init__(self, cfgdir=cfgdir, env=env)
self.subject = subject
self.BIPDIR = env['BIPDIR']
self.FSLDIR = env['FSLDIR']
with open(self.get_data('dMRI/autoptx/struct_list.json'), 'r',
encoding="utf-8") as f:
self.tract_struct = json.load(f)
......@@ -66,18 +66,20 @@ class Context(Config):
def MNI_brain_mask(self):
return self.get_standard('MNI152_T1_1mm_brain_mask')
def get_standard(self, fileName):
fileName.replace('/', os.sep)
def get_standard(self, file_name):
file_name = file_name.replace('/', os.sep)
basedir = self.FSLDIR + os.sep + op.join('data', 'standard') + os.sep
return basedir + fileName
return basedir + file_name
def get_atlas(self, fileName):
fileName.replace('/', os.sep)
def get_atlas(self, file_name):
file_name = file_name.replace('/', os.sep)
basedir = self.FSLDIR + os.sep + op.join('data', 'atlases') + os.sep
return basedir + fileName
return basedir + file_name
def get_data(self, fileName):
return bip.utils.get_data(fileName)
@staticmethod
def get_data(file_name):
file_name = file_name.replace('/', os.sep)
return bip.utils.get_data(file_name)
def parseArguments():
......@@ -87,7 +89,10 @@ def parseArguments():
#########################
parser = MyParser(description='BioBank Pipeline Manager V. 2.0')
parser.add_argument("subjectFolder", help='Subject Folder', action="store")
parser.add_argument("configDir", help='configDir', action="store")
parser.add_argument("-c", "--configDir",
help='Configuration folder: '+
'A config.toml file must exist.',
action="store", default=bip.utils.get_data('config'))
return parser.parse_args()
......
......@@ -51,7 +51,8 @@ def run(ctx,
seed_mult = ctx.get('dMRI_multiplier_autoPtx_seeds', 300)
t_seeds = int(ctx.tract_struct[t_name]["num_seeds"] * seed_mult)
orig_tract_dir = ctx.get_data('dMRI/autoptx/protocols/' + t_name + '/')
orig_tract_dir = ctx.get_data('dMRI/autoptx/protocols/' + t_name) + \
op.sep
FMRIB58_FA_1mm = ctx.get_standard('FMRIB58_FA_1mm.nii.gz')
# Does the protocol defines a second
......
......@@ -139,7 +139,7 @@ def run(ctx,
wrappers.fslmaths(TBSS_all_FA).mas(TBSS_mean_FA_skeleton_mask).\
run(TBSS_all_FA_skeletonised)
atlas = ctx.get_data('/dMRI/TBSS/JHU-ICBM-labels-1mm.nii.gz')
atlas = ctx.get_data('dMRI/TBSS/JHU-ICBM-labels-1mm.nii.gz')
mean = wrappers.fslstats(TBSS_all_FA_skeletonised, K=atlas).M.run()
# TODO: Funnily enough, this atlas has changed over time, so for v2
......
......@@ -35,7 +35,7 @@ def run(ctx,
with redirect_logging(job_name(run, D), outdir=logs_dir):
MNI_2mm_mask = ctx.get_standard("MNI152_T1_2mm_brain_mask.nii.gz")
group_IC = ctx.get_data("/netmats/melodic_IC_" + D + ".nii.gz")
group_IC = ctx.get_data("netmats/melodic_IC_" + D + ".nii.gz")
# Create this directory in case it does not exist. This may
# happen due to the overwrite function issue in Melodic
......
......@@ -10,6 +10,8 @@
# pylint: disable=W0613,R1718,C0201,C0206,R0912,W0702,C0301
#
import os
import os.path as op
import logging
from pipe_tree import In, Out, Ref
from bip.pipelines.struct_FS.FS_get_IDPs_fnc import bb_FS_get_IDPs
......@@ -20,8 +22,15 @@ log = logging.getLogger(__name__)
def run(ctx,
ThalamicNuclei: In,
rh_entorhinal_exvivo_label: In,
FS_stats_dir: In,
FreeSurfer_dir: In,
logs_dir: Ref,
FS_IDPs: Out):
FS_IDPs: Out,
FS_headers_info: Out):
with redirect_logging(job_name(run), outdir=logs_dir):
bb_FS_get_IDPs(ctx)
env = dict(os.environ, SUBJECTS_DIR=op.join(os.getcwd(), ctx.subject))
bb_FS_get_IDPs(ctx, env, FreeSurfer_dir, FS_IDPs, FS_headers_info,
FS_stats_dir)
......@@ -31,82 +31,89 @@ def read_file(fileName):
result.append([x.replace("\n", '') for x in line.split(' ')])
return result
def generate_FS_IDP_files(subjects_dir, subject_ID, subject, dataDir,
headersDir, ctx):
statsDir = op.join(subjects_dir, subject_ID, 'stats') + os.sep
def generate_FS_IDP_files(subject_ID, FS_stats_dir, data_dir, ctx, env):
# ODO: Include a pre-requisite that python2.7 must be availble in the system
if op.isfile(statsDir + 'aseg.stats'):
if op.isfile(op.join(FS_stats_dir, 'aseg.stats')):
run_command(log, 'asegstats2table ' +
' -m volume --all-segs --tablefile ' + dataDir +
'aseg_1.txt --subjects ' + subject_ID + ' --skip')
' -m volume --all-segs --tablefile ' +
op.join(data_dir, 'aseg_1.txt') +
' --subjects ' + subject_ID +
' --skip', env=env)
run_command(log, 'asegstats2table ' +
' -m mean --all-segs --tablefile ' + dataDir +
'aseg_intensity.txt ' + ' --subjects ' + subject_ID +
' --skip')
' -m mean --all-segs --tablefile ' +
op.join(data_dir, 'aseg_intensity.txt') +
' --subjects ' + subject_ID +
' --skip', env=env)
if op.isfile(statsDir + 'lh.w-g.pct.stats'):
if op.isfile(op.join(FS_stats_dir, 'lh.w-g.pct.stats')):
run_command(log, 'asegstats2table ' +
' -m mean --all-segs --stats=lh.w-g.pct.stats ' +
' --tablefile ' + dataDir + 'wg_lh_mean.txt ' +
' --subjects ' + subject_ID + ' --skip')
' --tablefile ' +
op.join(data_dir, 'wg_lh_mean.txt ') +
' --subjects ' + subject_ID +
' --skip', env=env)
if op.isfile(statsDir + 'rh.w-g.pct.stats'):
if op.isfile(op.join(FS_stats_dir, 'rh.w-g.pct.stats')):
run_command(log, 'asegstats2table ' +
' -m mean --all-segs --stats=rh.w-g.pct.stats ' +
' --tablefile ' + dataDir + 'wg_rh_mean.txt ' +
' --subjects ' + subject_ID + ' --skip')
' --tablefile ' +
op.join(data_dir, 'wg_rh_mean.txt ') +
' --subjects ' + subject_ID +
' --skip', env=env)
for hemi in ["lh", "rh"]:
for value in ["volume", "area", "thickness"]:
for atlas in ["BA_exvivo", "aparc.DKTatlas", "aparc.a2009s",
"aparc"]:
outFileName = dataDir + atlas + '_' + hemi + '_' + value +\
'.txt'
if op.isfile(statsDir + hemi + "." + atlas + '.stats'):
file_name = atlas + '_' + hemi + '_' + value + '.txt'
out_file_name = op.join(data_dir, file_name)
if op.isfile(op.join(FS_stats_dir,
hemi + "." + atlas + '.stats')):
run_command(log, 'aparcstats2table ' +
' -m ' + value + ' --hemi=' + hemi +
' --tablefile ' + outFileName +
' --tablefile ' + out_file_name +
' --subjects ' + subject_ID +
' --skip -p ' + atlas)
' --skip -p ' + atlas, env=env)
atlas = "aparc.pial"
value = "area"
for hemi in ["lh", "rh"]:
outFileName = dataDir + atlas + '_' + hemi + '_' + value + '.txt'
if op.isfile(statsDir + hemi + '.aparc.pial.stats'):
file_name = atlas + '_' + hemi + '_' + value + '.txt'
out_file_name = op.join(data_dir, file_name)
if op.isfile(op.join(FS_stats_dir, hemi + '.aparc.pial.stats')):
run_command(log, 'aparcstats2table ' +
' -m ' + value + ' --hemi=' + hemi +
' --tablefile ' + outFileName +
' --tablefile ' + out_file_name +
' --subjects ' + subject_ID +
' --skip -p ' + atlas)
' --skip -p ' + atlas, env=env)
with open(ctx.get_data('FS/FS_initial_files.txt'), encoding="utf-8") as f:
files_generated = [x.replace('\n', '').split(" ") for x in f.readlines()]
files_generated = [x.replace('\n', '').split(" ")
for x in f.readlines()]
data_dict = {}
for file_generated in files_generated:
if op.isfile(dataDir + file_generated[0] + '.txt'):
data = read_file(dataDir + file_generated[0] + '.txt')
file_name = op.join(data_dir, file_generated[0] + '.txt')
if op.isfile(file_name):
data = read_file(file_name)
else:
data = read_file(ctx.get_data('FS/FS_data_ex/' +
file_generated[0] + '.txt'))
data_dict[file_generated[0]] = data
data_dict['ID'] = [['ID'], [subject]]
data_dict['ID'] = [['ID'], [ctx.subject]]
return data_dict
# Quick consistency check
def check_consistency(data_dict, subjects_dir, ctx):
def check_consistency(data_dict, subjects_dir, FS_IDPs, ctx):
for file_generated in data_dict.keys():
if len(data_dict[file_generated]) > 2:
save_data_NaNs(subjects_dir, ctx)
save_data_NaNs(subjects_dir, FS_IDPs, ctx)
raise Exception("Error in " + file_generated +
': File has more than 2 lines')
......@@ -114,45 +121,49 @@ def check_consistency(data_dict, subjects_dir, ctx):
len1 = len(data_dict[file_generated][1])
if len0 != len1:
save_data_NaNs(subjects_dir, ctx)
save_data_NaNs(subjects_dir, FS_IDPs, ctx)
raise Exception("Error in " + file_generated +
': Inconsistent # of features')
def fix_aseg_data(data_dict, subjectDir):
s_1 = 'aseg_1'
s_global = 'aseg_global'
# Split aseg_1 into aseg_global and aseg_volume
data_dict['aseg_global'] = [[], []]
data_dict['aseg_global'][0] = [data_dict['aseg_1'][0][0]] + data_dict['aseg_1'][0][46:]
data_dict['aseg_global'][1] = [data_dict['aseg_1'][1][0]] + data_dict['aseg_1'][1][46:]
data_dict[s_global] = [[], []]
data_dict[s_global][0] = [data_dict[s_1][0][0]] + data_dict[s_1][0][46:]
data_dict[s_global][1] = [data_dict[s_1][1][0]] + data_dict[s_1][1][46:]
# Variables not needed
vars_to_delete = ['CortexVol', 'CerebralWhiteMatterVol',
'SupraTentorialVolNotVentVox', 'MaskVol', 'SurfaceHoles']
ind_to_delete = []
for i in range(len(data_dict['aseg_global'][0])):
if not data_dict['aseg_global'][0][i] in vars_to_delete:
for i in range(len(data_dict[s_global][0])):
if not data_dict[s_global][0][i] in vars_to_delete:
ind_to_delete.append(i)
data_dict['aseg_global'][0] = [data_dict['aseg_global'][0][x] for x in ind_to_delete]
data_dict['aseg_global'][1] = [data_dict['aseg_global'][1][x] for x in ind_to_delete]
data_dict[s_1][0] = [data_dict[s_1][0][x] for x in ind_to_delete]
data_dict[s_1][1] = [data_dict[s_1][1][x] for x in ind_to_delete]
# For some reason, the VentricleChoroidVol is not caught by asegstats2table
try:
file_name = op.join(subjectDir, 'stats' , 'aseg.stats')
with open(file_name, 'r', encoding="utf-8") as f:
val = [x.split(',')[3].strip() for x in f.readlines() if 'VentricleChoroidVol' in x]
val = [x.split(',')[3].strip() for x in f.readlines()
if 'VentricleChoroidVol' in x]
except Exception:
val = ["NaN"]
data_dict['aseg_global'][0].append('VentricleChoroidVol')
data_dict['aseg_global'][1].append(val[0])
data_dict[s_global][0].append('VentricleChoroidVol')
data_dict[s_global][1].append(val[0])
data_dict['aseg_volume'] = [[], []]
data_dict['aseg_volume'][0] = data_dict['aseg_1'][0][0:46]
data_dict['aseg_volume'][1] = data_dict['aseg_1'][1][0:46]
data_dict[s_global] = [[], []]
data_dict[s_global][0] = data_dict[s_1][0][0:46]
data_dict[s_global][1] = data_dict[s_1][1][0:46]
del data_dict['aseg_1']
del data_dict[s_1]
# Remove the WM-hypointensities. No value in any subject
cols_to_remove = ['Left-WM-hypointensities',
......@@ -194,9 +205,10 @@ def gen_aparc_special(data_dict, subjectDir):
for elem in struct_data:
data_dict[elem[0]][0].append(elem[1])
try:
with open(subjectDir + '/stats/' + elem[2], 'r',
with open(op.join(subjectDir, 'stats', elem[2]), 'r',
encoding="utf-8") as f:
v = [x.split(',')[3].strip() for x in f.readlines() if elem[3] in x]
v = [x.split(',')[3].strip() for x in f.readlines()
if elem[3] in x]
data_dict[elem[0]][1].append(v[0])
except Exception:
data_dict[elem[0]][1].append('NaN')
......@@ -205,7 +217,7 @@ def gen_aparc_special(data_dict, subjectDir):
def bool_FLAIR(data_dict, subjectDir):
if op.isfile(subjectDir + '/mri/FLAIR.mgz'):
if op.isfile(op.join(subjectDir, 'mri', 'FLAIR.mgz')):
data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'], ['1']]
else:
data_dict['FLAIR'] = [['Use-T2-FLAIR-for-FreeSurfer'], ['0']]
......@@ -229,7 +241,7 @@ def gen_subsegmentation(data_dict, subjectDir, subject, ctx):
found = False
data_dict[struct] = [[], []]
for fil in struct_data[struct][0]:
final_fil = subjectDir + '/FreeSurfer/mri/' + fil
final_fil = op.join(subjectDir, 'FreeSurfer', 'mri', fil)
if op.isfile(final_fil):
with open(final_fil, 'r', encoding="utf-8") as f:
for lin in f.readlines():
......@@ -315,7 +327,6 @@ def remove_first_feature(data_dict, subject):
return data_dict
def fix_headers(data_dict):
# Applying some general replacing rules for the categories
replace_rules = [['.', '-'],
['BA_exvivo', 'BA-exvivo'],
......@@ -338,20 +349,26 @@ def fix_headers(data_dict):
data_dict[new_key] = data_dict.pop(key)
# Renaming some special cases
structs = [['aseg_global', 'lhSurfaceHoles', 'aseg_lh_number', 'HolesBeforeFixing'],
['aseg_global', 'rhSurfaceHoles', 'aseg_rh_number', 'HolesBeforeFixing'],
['aseg_global', 'BrainSegVol-to-eTIV', 'aseg_global_volume-ratio', 'BrainSegVol-to-eTIV'],
['aseg_global', 'MaskVol-to-eTIV', 'aseg_global_volume-ratio', 'MaskVol-to-eTIV']]
s_global = 'aseg_global'
structs = [[s_global, 'lhSurfaceHoles',
'aseg_lh_number', 'HolesBeforeFixing'],
[s_global, 'rhSurfaceHoles',
'aseg_rh_number', 'HolesBeforeFixing'],
[s_global, 'BrainSegVol-to-eTIV',
'aseg_global_volume-ratio', 'BrainSegVol-to-eTIV'],
[s_global, 'MaskVol-to-eTIV',
'aseg_global_volume-ratio', 'MaskVol-to-eTIV']]
for struct in structs:
index = data_dict[struct[0]][0].index(struct[1])
if struct[2] not in data_dict.keys():
data_dict[struct[2]] = [struct[3]], [data_dict[struct[0]][1][index]]
else:
data_dict[struct[2]][0].append(struct[3])
data_dict[struct[2]][1].append(data_dict[struct[0]][1][index])
del data_dict[struct[0]][0][index]
del data_dict[struct[0]][1][index]
if struct[1] in data_dict[struct[0]][0]:
index = data_dict[struct[0]][0].index(struct[1])
if struct[2] not in data_dict.keys():
data_dict[struct[2]] = [struct[3]], [data_dict[struct[0]][1][index]]
else:
data_dict[struct[2]][0].append(struct[3])
data_dict[struct[2]][1].append(data_dict[struct[0]][1][index])
del data_dict[struct[0]][0][index]
del data_dict[struct[0]][1][index]
for metric in ['volume', 'intensity']:
old_key = 'aseg_' + metric
......@@ -382,23 +399,23 @@ def fix_headers(data_dict):
del data_dict[old_key]
for i in range(len(data_dict['aseg_global'][0])):
if data_dict['aseg_global'][0][i].startswith('lh'):
new_name = data_dict['aseg_global'][0][i].replace('lh', '').\
for i in range(len(data_dict[s_global][0])):
if data_dict[s_global][0][i].startswith('lh'):
new_name = data_dict[s_global][0][i].replace('lh', '').\
replace('Vol', '')
data_dict['aseg_lh_volume'][0].append(new_name)
data_dict['aseg_lh_volume'][1].append(data_dict['aseg_global'][1][i])
elif data_dict['aseg_global'][0][i].startswith('rh'):
new_name = data_dict['aseg_global'][0][i].replace('rh', '').\
data_dict['aseg_lh_volume'][1].append(data_dict[s_global][1][i])
elif data_dict[s_global][0][i].startswith('rh'):
new_name = data_dict[s_global][0][i].replace('rh', '').\
replace('Vol', '')
data_dict['aseg_rh_volume'][0].append(new_name)
data_dict['aseg_rh_volume'][1].append(data_dict['aseg_global'][1][i])
data_dict['aseg_rh_volume'][1].append(data_dict[s_global][1][i])
else:
new_name = data_dict['aseg_global'][0][i].replace('Vol', '')
new_name = data_dict[s_global][0][i].replace('Vol', '')
data_dict['aseg_global_volume'][0].append(new_name)
data_dict['aseg_global_volume'][1].append(data_dict['aseg_global'][1][i])
data_dict['aseg_global_volume'][1].append(data_dict[s_global][1][i])
del data_dict['aseg_global']
del data_dict[s_global]
# Split ThalamNuclei into Left and Right
data_dict['ThalamNuclei_lh_volume'] = [[], []]
......@@ -447,21 +464,21 @@ def fix_headers(data_dict):
return data_dict
def save_data_NaNs(subjects_dir, ctx):
def save_data_NaNs(subjects_dir, FS_IDPs, ctx):
with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
final_headers = [x.replace('\n', '') for x in f.readlines()]
num_NaNs = len(final_headers) - 1
with open(subjects_dir+'/IDP_files/FS_IDPs.txt', 'w', encoding="utf-8") as f:
with open(FS_IDPs, 'w', encoding="utf-8") as f:
values = ['NaN'] * num_NaNs
values_str = subjects_dir + " " + " ".join(values)
f.write(f"{values_str}\n")
f.close()
def save_data(data_dict, subjects_dir, ctx):
def save_data(data_dict, FS_IDPs, ctx):
with open(ctx.get_data('FS/FS_headers.txt'), encoding="utf-8") as f:
final_headers = [x.replace('\n', '') for x in f.readlines()]
......@@ -479,14 +496,14 @@ def save_data(data_dict, subjects_dir, ctx):
if x not in temp_headers.keys():
temp_headers[x] = "NaN"
with open(subjects_dir+'/IDP_files/FS_IDPs.txt', 'w', encoding="utf-8") as f:
with open(FS_IDPs, 'w', encoding="utf-8") as f:
values = [temp_headers[x] for x in final_headers]
values_str = " ".join(values)
f.write(f"{values_str}\n")
f.close()
def save_headers_info(data_dict, subjects_dir, ctx):
def save_headers_info(data_dict, FS_headers_info, ctx):
with open(ctx.get_data('FS/FS_final_headers.txt'), encoding="utf-8") as f:
final_headers = [x.replace('\n', '') for x in f.readlines()]
......@@ -504,37 +521,37 @@ def save_headers_info(data_dict, subjects_dir, ctx):
if x not in temp_headers.keys():
temp_headers[x] = "NaN"
with open(subjects_dir + '/IDP_files/FS_headers_info.txt', 'w',
encoding="utf-8") as f:
with open(FS_headers_info, 'w', encoding="utf-8") as f:
values = [temp_headers[x] for x in final_headers]
values_str = " ".join(values)
f.write(f"{values_str}\n")
f.close()
def bb_FS_get_IDPs(ctx):
def bb_FS_get_IDPs(ctx, env, FreeSurfer_dir, FS_IDPs, FS_headers_info,
FS_stats_dir):
subject = ctx.subject
subject_ID = 'FreeSurfer'
subjectDir = op.join(os.getcwd(), ctx.subject)
dataDir = subjectDir + '/data/'
headersDir = subjectDir + '/headers/'
subject = ctx.subject
subject_ID = FreeSurfer_dir.split(op.sep)[-1]
subjects_dir = op.join(os.getcwd(), ctx.subject)
data_dir = op.join(os.getcwd(), FreeSurfer_dir, 'data') + op.sep
if not op.isdir(subjectDir):
print(data_dir + " " + subjects_dir + " ")
if not op.isdir(subjects_dir):
raise Exception("Error: FreeSurfer has not been run on this subject")
check_and_create_dir(dataDir)
check_and_create_dir(headersDir)
check_and_create_dir(data_dir)
data_dict = generate_FS_IDP_files(subjectDir, subject_ID, subject, dataDir,
headersDir, ctx)
data_dict = fix_aseg_data(data_dict, subjectDir)
data_dict = gen_aparc_special(data_dict, subjectDir)
data_dict = gen_subsegmentation(data_dict, subjectDir, subject, ctx)
data_dict = bool_FLAIR(data_dict, subjectDir)
data_dict = generate_FS_IDP_files(subject_ID, FS_stats_dir, data_dir, ctx,
env)
data_dict = fix_aseg_data(data_dict, subjects_dir)
data_dict = gen_aparc_special(data_dict, subjects_dir)
data_dict = gen_subsegmentation(data_dict, subjects_dir, subject, ctx)
data_dict = bool_FLAIR(data_dict, subjects_dir)
data_dict = fix_aparc_data(data_dict)
data_dict = remove_first_feature(data_dict, subject)
data_dict = fix_headers(data_dict)
check_consistency(data_dict, subjectDir, ctx)
save_data(data_dict, subjectDir, ctx)
check_consistency(data_dict, subjects_dir, FS_IDPs, ctx)
save_data(data_dict, FS_IDPs, ctx)
save_headers_info(data_dict, FS_headers_info, ctx)
......@@ -23,9 +23,10 @@ def run(ctx,
T1_unbiased: In,
T2_FLAIR_unbiased: In(optional=True),
logs_dir: Ref,
FreeSurfer_dir: Ref,
fsaverage: Ref,
rh_entorhinal_exvivo_label: Out):
rh_entorhinal_exvivo_label: Out,
FreeSurfer_dir: Out,
FS_stats_dir: Out):
with redirect_logging(job_name(run), outdir=logs_dir):
......
......@@ -6,20 +6,16 @@
"""The bip.utils package contains a range of miscellaneous utilities. """
import contextlib
import logging
import contextlib
import logging
import os.path as op
import os
import time
import errno
import bip
import os
import time
import errno
import bip
log = logging.getLogger(__name__)
@contextlib.contextmanager
def lockdir(dirname, delay=5):
"""Lock a directory for exclusive access.
......@@ -49,11 +45,10 @@ def lockdir(dirname, delay=5):
finally:
log.debug('Relinquishing lock on %s', dirname)
os.close( fd)
os.close(fd)
os.unlink(lockfile)
def get_data(filename):
"""Return the filename, prefixed with the bip/data/ directory path. """
filename.replace('/', os.sep)
return op.join(bip.BIPDIR, 'data', filename)
def get_data(file_name):
"""Return the file_name, prefixed with the bip/data/ directory path. """
file_name = file_name.replace('/', os.sep)
return op.join(bip.BIPDIR, 'data', file_name)
......@@ -8,6 +8,8 @@
#
# pylint: disable=C0103,E0602,C0114,C0115,C0116,R0913,R0914,R0915
#
import os.path as op
import sys
import argparse
import os.path
......@@ -50,8 +52,10 @@ def main():
'If none specified, all will be selected')
parser.add_argument('-l', dest='B0limit', type=int, default=[100], nargs=1,
help='Limit B0 value. (Default 100)')
parser.add_argument('-a', dest='bvalFilename', type=str, default='', nargs=1,
help='bval file. (Default: Same basename as the input file)')
parser.add_argument('-a', dest='bvalFilename', type=str, default='',
nargs=1,
help='bval file. (Default: Same basename ' +
'as the input file)')
argsa = parser.parse_args()
......@@ -72,7 +76,7 @@ def main():
outN = os.path.basename(outputFile).split('.')[0]
if argsa.bvalFilename == '':
bvalFilename = baseDir + "/" + baseN+".bval"
bvalFilename = op.join(baseDir, baseN+".bval")
else:
bvalFilename = argsa.bvalFilename[0]
......@@ -97,7 +101,7 @@ def main():
print("The number of B0 must be positive")
sys.exit()
outputIndFile = outDir + '/' + outN + '_indices.txt'
outputIndFile = op.join(outDir, outN + '_indices.txt')
get_b0s(inputFile, bvalFilename, outputFile, outputIndFile,
desiredNumber, B0limit)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment