Commit 2ab6e668 authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

added masking network outputs and demeaning targets

parent b8095ea6
......@@ -118,4 +118,5 @@ dmypy.json
.vscode/
datasets/
files.txt
jobscript.sge.sh
\ No newline at end of file
jobscript.sge.sh
*.nii.gz
\ No newline at end of file
......@@ -311,12 +311,18 @@ def evaluate_mapping(mapping_evaluation_parameters):
prediction_output_path = mapping_evaluation_parameters['prediction_output_path']
device = mapping_evaluation_parameters['device']
exit_on_error = mapping_evaluation_parameters['exit_on_error']
brain_mask_path = mapping_evaluation_parameters['brain_mask_path']
mean_mask_path = mapping_evaluation_parameters['mean_mask_path']
mean_reduction = mapping_evaluation_parameters['mean_reduction']
evaluations.evaluate_mapping(trained_model_path,
data_directory,
mapping_data_file,
data_list,
prediction_output_path,
brain_mask_path,
mean_mask_path,
mean_reduction,
device=device,
exit_on_error=exit_on_error)
......
......@@ -14,6 +14,9 @@ train_data_file = "dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
train_output_targets = "fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
validation_data_file = "dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
validation_target_file = "fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
brain_mask_path = "utils/MNI152_T1_2mm_brain_mask.nii.gz"
mean_mask_path = "utils/mean_dr_stage2.nii"
mean_reduction = True
[TRAINING]
training_batch_size = 4
......
......@@ -4,5 +4,8 @@ data_directory = "/well/win-biobank/projects/imaging/data/data3/subjectsAll/"
mapping_data_file = "dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
data_list = "datasets/test.txt"
prediction_output_path = "network_predictions"
brain_mask_path = "utils/MNI152_T1_2mm_brain_mask.nii.gz"
mean_mask_path = "utils/mean_dr_stage2.nii"
mean_reduction = True
device = 0
exit_on_error = True
\ No newline at end of file
......@@ -206,6 +206,9 @@ def evaluate_mapping(trained_model_path,
mapping_data_file,
data_list,
prediction_output_path,
brain_mask_path,
mean_mask_path,
mean_reduction,
device=0,
mode='evaluate',
exit_on_error=False):
......@@ -219,6 +222,9 @@ def evaluate_mapping(trained_model_path,
mapping_data_file (str): Path to the input file
data_list (str): Path to a .txt file containing the input files for consideration
prediction_output_path (str): Output prediction path
brain_mask_path (str): Path to the MNI brain mask file
mean_mask_path (str): Path to the dualreg subject mean mask
mean_reduction (bool): Flag indicating if the targets should be de-meaned using the mean_mask_path
device (str/int): Device type used for training (int - GPU id, str- CPU)
mode (str): Current run mode or phase
exit_on_error (bool): Flag that triggers the raising of an exception
......@@ -269,8 +275,8 @@ def evaluate_mapping(trained_model_path,
try:
print("Mapping Volume {}/{}".format(volume_index+1, len(file_paths)))
# Generate volume & header
_, predicted_volume, header, xform = _generate_volume_map(
file_path, model, device, cuda_available)
predicted_complete_volume, predicted_volume, header, xform = _generate_volume_map(
file_path, model, device, cuda_available, brain_mask_path, mean_mask_path, mean_reduction)
# Generate New Header Affine
......@@ -288,6 +294,17 @@ def evaluate_mapping(trained_model_path,
output_nifti_image.save(output_nifti_path)
if mean_reduction == True:
output_complete_nifti_image = Image(
predicted_complete_volume, header=header, xform=xform)
output_complete_nifti_path = output_nifti_path + '_complete'
if '.nii' not in output_complete_nifti_path:
output_complete_nifti_path += '.nii.gz'
output_complete_nifti_image.save(output_complete_nifti_path)
log.info("Processed: " + volumes_to_be_used[volume_index] + " " + str(
volume_index + 1) + " out of " + str(len(volumes_to_be_used)))
......@@ -306,7 +323,7 @@ def evaluate_mapping(trained_model_path,
log.info("rsfMRI Generation Complete")
def _generate_volume_map(file_path, model, device, cuda_available):
def _generate_volume_map(file_path, model, device, cuda_available, brain_mask_path, mean_mask_path, mean_reduction=False):
"""rsfMRI Volume Generator
This function uses the trained model to generate a new volume
......@@ -316,6 +333,9 @@ def _generate_volume_map(file_path, model, device, cuda_available):
model (class): BrainMapper model class
device (str/int): Device type used for training (int - GPU id, str- CPU)
cuda_available (bool): Flag indicating if a cuda-enabled GPU is present
brain_mask_path (str): Path to the MNI brain mask file
mean_mask_path (str): Path to the dualreg subject mean mask
mean_reduction (bool): Flag indicating if the targets should be de-meaned using the mean_mask_path
Returns
predicted_volume (np.array): Array containing the information regarding the generated volume
......@@ -332,23 +352,32 @@ def _generate_volume_map(file_path, model, device, cuda_available):
volume = torch.tensor(volume).type(torch.FloatTensor)
output_volume = []
MNI152_T1_2mm_brain_mask = torch.from_numpy(Image(brain_mask_path).data)
MNI152_T1_2mm_brain_mask = torch.from_numpy(Image('utils/MNI152_T1_2mm_brain_mask.nii.gz').data)
if mean_reduction == True:
mean_mask = torch.from_numpy(Image(mean_mask_path).data)
if cuda_available and (type(device) == int):
volume = volume.cuda(device)
MNI152_T1_2mm_brain_mask = MNI152_T1_2mm_brain_mask.cuda(device)
if mean_reduction == True:
mean_mask = mean_mask.cuda(device)
output = model(volume)
output = torch.mul(output, MNI152_T1_2mm_brain_mask)
predicted_volume = output
if mean_reduction==True:
predicted_complete_volume = torch.add(output, mean_mask)
predicted_complete_volume = (complete_output.cpu().numpy()).astype('float32')
predicted_complete_volume = np.squeeze(predicted_complete_volume)
else:
predicted_complete_volume = None
predicted_volume = output
predicted_volume = (predicted_volume.cpu().numpy()).astype('float32')
predicted_volume = np.squeeze(predicted_volume)
return output_volume, predicted_volume, header, xform
return predicted_complete_volume, predicted_volume, header, xform
# DEPRECATED FUNCTIONS
......
......@@ -49,7 +49,6 @@ def directory_reader(folder_location, subject_number=None, write_txt=False):
subject_number = len(os.listdir(os.path.join(
os.path.expanduser("~"), folder_location)))
for directory in os.listdir(folder_location):
if number_of_subjects < subject_number:
if os.path.isdir(os.path.join(folder_location, directory)) and os.path.exists(os.path.join(folder_location, directory, "dMRI/autoptx_preproc/")) and os.path.exists(os.path.join(folder_location, directory, "fMRI/rfMRI_25.dr/")):
......@@ -130,14 +129,14 @@ def data_test_train_validation_split(data_folder_name, test_percentage, subject_
train_data, test_size=int(len(test)), random_state=42, shuffle=True)
if os.path.exists(os.path.join(data_folder_name, 'train.txt')):
os.remove(os.path.join(data_folder_name,'train.txt'))
os.remove(os.path.join(data_folder_name, 'train.txt'))
np.savetxt(os.path.join(data_folder_name,
'train.txt'), train, fmt='%s')
if os.path.exists(os.path.join(data_folder_name, 'validation.txt')):
os.remove(os.path.join(data_folder_name,'validation.txt'))
os.remove(os.path.join(data_folder_name, 'validation.txt'))
np.savetxt(os.path.join(data_folder_name, 'validation.txt'),
validation, fmt='%s')
validation, fmt='%s')
else:
k_fold = KFold(n_splits=K_fold)
......@@ -145,13 +144,15 @@ def data_test_train_validation_split(data_folder_name, test_percentage, subject_
for train_index, validation_index in k_fold.split(train_data):
train, validation = train_data[train_index], train_data[validation_index]
if os.path.exists(os.path.join(data_folder_name, 'train' + str(k+1)+ '.txt')):
os.remove(os.path.join(data_folder_name,'train' + str(k+1)+ '.txt'))
if os.path.exists(os.path.join(data_folder_name, 'train' + str(k+1) + '.txt')):
os.remove(os.path.join(data_folder_name,
'train' + str(k+1) + '.txt'))
np.savetxt(os.path.join(data_folder_name, 'train' +
str(k+1)+'.txt'), train, fmt='%s')
if os.path.exists(os.path.join(data_folder_name, 'validation' + str(k+1)+ '.txt')):
os.remove(os.path.join(data_folder_name,'validation' + str(k+1)+ '.txt'))
if os.path.exists(os.path.join(data_folder_name, 'validation' + str(k+1) + '.txt')):
os.remove(os.path.join(data_folder_name,
'validation' + str(k+1) + '.txt'))
np.savetxt(os.path.join(data_folder_name, 'validation' +
str(k+1)+'.txt'), validation, fmt='%s')
......@@ -187,6 +188,8 @@ class DataMapper(data.Dataset):
data_directory (str): Directory where the various subjects are stored.
data_file (str): Intenal path for each subject to the relevant normalized summed dMRI tracts
output_targets (str): Internal path for each subject to the relevant rsfMRI data
mean_mask_path (str): Path to the dualreg subject mean mask
mean_reduction (bool): Flag indicating if the targets should be de-meaned using the mean_mask_path
Returns:
X_volume (torch.tensor): Tensor representation of the input data
......@@ -195,7 +198,7 @@ class DataMapper(data.Dataset):
"""
def __init__(self, filename, data_directory, data_file, output_targets):
def __init__(self, filename, data_directory, data_file, output_targets, mean_mask_path, mean_reduction=False):
# Initialize everything, and only store in memory the text data file.
# Memory usage limited by only storing the text string information, not the actual volumes.
# TODO: Currently, the timepoint in the fMRI data (y_volume) is hardcoded, only loading in the RSN. This needs to be updated in later iterations.
......@@ -205,6 +208,8 @@ class DataMapper(data.Dataset):
self.output_targets = output_targets
self.sample_pairs = []
self._get_datasets()
self.mean_mask_path = mean_mask_path
self.mean_reduction = mean_reduction
def __len__(self):
return len(self.sample_pairs)
......@@ -214,7 +219,12 @@ class DataMapper(data.Dataset):
X_path, y_path = self.sample_pairs[index]
X_volume = torch.from_numpy(self.resample(X_path))
y_volume = torch.from_numpy(self.convert_to_numpy(y_path)[:, :, :, 0])
if self.mean_reduction == True:
y_volume = torch.from_numpy(self.subtract_mean(
y_path, self.mean_mask_path)[:, :, :, 0])
else:
y_volume = torch.from_numpy(
self.convert_to_numpy(y_path)[:, :, :, 0])
return X_volume, y_volume
......@@ -289,6 +299,23 @@ class DataMapper(data.Dataset):
volume_numpy = self.read_data_files(path).data
return volume_numpy
def subtract_mean(self, path, mean_mask_path):
"""Mean Mask Substraction
Helper function which substracts the dualreg mean subject mask
Args:
mean_mask_path (str): Path to the dualreg subject mean mask
Returns:
subtracted_volume (np.array): Numpy array representation of the subtracted volume data
"""
dualreg_subject_mean = self.convert_to_numpy(mean_mask_path)
volume = self.convert_to_numpy(path)
subtracted_volume = np.subtract(volume, dualreg_subject_mean)
return subtracted_volume
def get_datasets(data_parameters):
"""Data Loader Function.
......@@ -320,11 +347,14 @@ def get_datasets(data_parameters):
validation_data_file = data_parameters['validation_data_file']
validation_output_targets = data_parameters['validation_target_file']
mean_mask_path = data_parameters['mean_mask_path']
mean_reduction = data_parameters['mean_reduction']
return (
DataMapper(train_filename, data_directory,
train_data_file, train_output_targets),
train_data_file, train_output_targets, mean_mask_path, mean_reduction),
DataMapper(validation_filename, data_directory,
validation_data_file, validation_output_targets)
validation_data_file, validation_output_targets, mean_mask_path, mean_reduction)
)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment