Commit aeb4decd authored by Andrei Roibu's avatar Andrei Roibu
Browse files

eliminated deprecated functions, updated branch for AE

parent bd5515dc
......@@ -120,4 +120,6 @@ datasets/
files.txt
jobscript.sge.sh
*.nii.gz
stuff/
\ No newline at end of file
stuff/
test/*
.DS_Store
......@@ -433,6 +433,9 @@ if __name__ == '__main__':
network_parameters, misc_parameters)
logging.basicConfig(filename='evaluate-mapping-error.log')
evaluate_mapping(mapping_evaluation_parameters)
elif arguments.mode == 'prepare-data':
print('Ensure you have updated the settings.ini file accordingly! This call does nothing but pass after data was shuffled!')
pass
else:
raise ValueError(
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, train-and-evaluate-mapping, clear-experiments and clear-everything')
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, train-and-evaluate-mapping, prepare-data, clear-experiments and clear-everything')
[DATA]
data_folder_name = "datasets"
use_data_file = False
use_data_file = True
data_directory = "/well/win-biobank/projects/imaging/data/data3/subjectsAll/"
data_file = "/well/win-biobank/projects/imaging/data/data3/subjectsAll/subj_22k.txt"
k_fold = None
data_split_flag = False
data_split_flag = True
test_percentage = 5
subject_number = 600
subject_number = 12000
train_list = "datasets/train.txt"
validation_list = "datasets/validation.txt"
test_list = "datasets/test.txt"
......@@ -23,8 +23,8 @@ mean_reduction = True
experiment_name = "CU3D17-3"
pre_trained_path = "saved_models/CU3D17-3.pth.tar"
final_model_output_file = "CU3D17-3.pth.tar"
training_batch_size = 3
validation_batch_size = 3
training_batch_size = 5
validation_batch_size = 5
use_pre_trained = False
learning_rate = 1e-1
optimizer_beta = (0.9, 0.999)
......@@ -40,19 +40,15 @@ use_last_checkpoint = False
kernel_heigth = 3
kernel_width = 3
kernel_depth = 3
; kernel_classification = 1
kernel_classification = 7
input_channels = 1
output_channels = 64
convolution_stride = 1
dropout = 0.2
; pool_kernel_size = 2
pool_kernel_size = 3
pool_stride = 2
up_mode = "upconv"
number_of_classes = 1
; ---> parameters for the ResNet CGAN
pool_kernel_size = 3
kernel_classification = 7
[MISC]
save_model_directory = "saved_models"
......
......@@ -300,6 +300,9 @@ class Solver():
print('Final Model Saved in: {}'.format(model_output_path))
print('****************************************************************')
if self.start_epoch >= self.number_epochs+1:
validation_loss = None
return validation_loss
def save_checkpoint(self, state, filename):
......
This diff is collapsed.
......@@ -156,112 +156,3 @@ class LogWriter():
"""
self.log_writer['train'].add_graph(model)
# DEPRECATED / UNDEBUGGED FUNCTIONS
def plot_dice_score(self, dice_score, phase, plot_name, title, epochs=None):
"""Function plotting dice score for multiple epochs
This function plots the dice score for each epoch.
Args:
dice_score (torch.tensor): Dice score value for each class
phase (str): Current run mode or phase
plot_name (str): Caption name for later refference
title (str): Plot title
epoch (int): Current epoch value
"""
figure = matplotlib.figure.Figure() # Might add some arguments here later
ax = figure.add_subplot(1, 1, 1)
ax.set_xlabel(title)
ax.xaxis.set_label_position('top')
ax.bar(np.arange(self.number_of_classes), dice_score)
ax.set_xticks(np.arange(self.number_of_classes))
if self.labels is None:
pass
else:
ax.set_xticklabels(self.labels)
ax.xaxis.tick_bottom()
if epochs:
self.log_writer[phase].add_figure(
plot_name + '/' + phase, figure, global_step=epochs)
else:
self.log_writer[phase].add_figure(plot_name + '/' + phase, figure)
def dice_score_per_epoch(self, phase, outputs, correct_labels, epoch):
"""Function calculating dice score for each epoch
This function computes the dice score for each epoch.
Args:
phase (str): Current run mode or phase
outputs (torch.tensor): Tensor of all the network outputs (Y-hat)
correct_labels (torch.tensor): Output ground-truth labelled data (Y)
epoch (int): Current epoch value
"""
print("Dice Score is being calculated...", end='', flush=True)
dice_score = evaluation.dice_score_calculator(
outputs, correct_labels, self.number_of_classes)
mean_dice_score = torch.mean(dice_score)
self.plot_dice_score(
dice_score, phase, plot_name='dice_score_per_epoch', title='Dice Score', epochs=epoch)
print("Dice Score calculated successfully")
return mean_dice_score.item()
def sample_image_per_epoch(self, prediction, ground_truth, phase, epoch):
"""Function plotting mirrored images
This function plots a predicted and a grond truth images side-by-side.
Args:
prediction (torch.tensor): Predicted image after passing throught the network
ground_truth (torch.tensor): Labelled ground truth image
phase (str): Current run mode or phase
epoch (int): Current epoch value
"""
print("Sample Image is being loaded...", end='', flush=True)
figure, ax = plt.subplots(nrows=len(prediction), ncols=2)
for i in range(len(prediction)):
ax[i][0].imshow(prediction[i])
ax[i][0].set_title("Predicted Image")
ax[i][0].axis('off')
ax[i][1].imshow(ground_truth[i])
ax[i][1].set_title('Ground Truth Image')
ax[i][1].axis('off')
figure.set_tight_layout()
self.log_writer[phase].add_figure(
'sample_prediction/'+phase, figure, epoch)
print("Sample Image successfully loaded!")
def labels_generator(self, labels):
""" Label Generator Function
This function processess an input array of labels.
Args:
labels (arr): Vector/Array of labels (if applicable)
Returns:
label_classes (list): List of processed labels
"""
label_classes = []
for label in labels:
label_class = re.sub(
r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\1 ', label)
label_class = ['\n'.join(wrap(element, 40))
for element in label_class]
label_classes.append(label_class)
return label_classes
......@@ -609,262 +609,4 @@ def load_and_preprocess_targets(target_path, mean_mask_path):
target = Image(target_path[0]).data[:,:,:,0]
target_demeaned = np.subtract(target, Image(mean_mask_path).data[:,:,:,0])
return target, target_demeaned
# Deprecated Functions & Classes & Methods:
def set_orientation(volume, label_map, orientation):
"""Load Data Orientation
This function modifies the orientation of the input and output data depending on the required orientation.
Args:
volume (np.array): Array of training image data of data type dtype.
label_map (np.array): Array of labelled image data of data type dtype.
orientation (str): String detailing the current view (COR, SAG, AXL)
Returns:
volume (np.array): Array of training image data of data type dtype.
label_map (np.array): Array of labelled image data of data type dtype.
Raises:
ValueError: Orientation value is invalid. It must be either >>coronal<<, >>axial<< or >>sagital<<
"""
# TODO: will need to check if these alignments correpond with our data.
# These alignments work for ADNI
if orientation == "sagittal":
return volume, label_map # This is assumed to be the default orientation
elif orientation == "axial":
return volume.transpose((1, 2, 0)), label_map.transpose((1, 2, 0))
elif orientation == "coronal":
return volume.transpose((2, 0, 1)), label_map.transpose((2, 0, 1))
else:
raise ValueError(
"Orientation value is invalid. It must be either >>coronal<<, >>axial<< or >>sagital<< ")
def tract_sum_generator(folder_path):
"""Sums the tracts of different dMRI files
THIS FUNCTION IS NOT DEPRECATED: SummedTractMaps generated remotely
When performing subject-specific probabilistic diffusion tractography using standard-space protocols, 27 tracts are created.
This function loops through all the tracts, sums them and returns the summed tract map.
This function also outputs the summed tract map as a Nifti (.nii.gz) file.
Args:
folder_location (str): A string containing the address of the required directory.
"""
tractMapName = 'tracts/tractsNorm.nii.gz'
subDirectoryList = directory_reader(folder_path)
viableSubDirectories = len(subDirectoryList)
counter = 0
if not os.path.exists('/well/win/users/hsv459/functionmapper/datasets/dMRI'):
if not os.path.exists('/well/win/users/hsv459/functionmapper/datasets'):
os.mkdir('/well/win/users/hsv459/functionmapper/datasets')
os.mkdir('/well/win/users/hsv459/functionmapper/datasets/dMRI')
for subDirectory in subDirectoryList:
tractedMapsPath = os.path.join(folder_location, str(
subDirectory), 'dMRI/autoptx_preproc/tracts/')
sum_flag = False # This is a flat showing us if this is the first tracted to be summed
print("Summing the tract number: {}/{}".format(counter, viableSubDirectories))
for tract in os.listdir(tractedMapsPath):
if os.path.isdir(os.path.join(tractedMapsPath, tract)):
tractedMapPath = os.path.join(
tractedMapsPath, tract, tractMapName)
tractedMapImg = nib.load(tractedMapPath)
tractedMap = tractedMapImg.get_fdata()
# the affine array stores the relationship between voxel coordinates in the image data array and coordinates in the reference space
tractedMapAffine = tractedMapImg.affine
if sum_flag == False:
tractedMapSum = np.copy(tractedMap)
else:
tractedMapSum = np.sum(tractedMapSum, tractedMap)
tractedMapSumPath = '/well/win/users/hsv459/functionmapper/datasets/dMRI'
tractsSumName = str(subDirectory) + ".nii.gz"
tractedMapSumImg = nib.Nifti1Image(tractedMapSum, tractedMapAffine)
nib.save(tractedMapSumImg, os.path.join(
tractedMapSumPath, tractsSumName))
counter += 1
return None
class DataMapperHDF5(data.Dataset):
"""Data Mapper Class.
THIS CLASS IS NOT DEPRECATED!
This class represents a generic parent class for mapping between keys and data samples.
The class represents a subclass/child class of data.Dataset, inheriting its functionality.
This class is composed of a __init__ constructor, a __getitem__(), supporting fetching a data sample for a given key, and __len__(), which returns the size of the dataset.
Args:
X (HDF5 datafile): hierarchically organized input data
y (HDF5 datafile): hierarchically organized output data
Returns:
input_data (torch.tensor): Tensor representation of the input data
label_data (torch.tensor): Tensor representation of the output data
int: lenght of the output
"""
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, index):
input_data = torch.from_numpy(self.X[index])
label_data = torch.from_numpy(self.y[index])
return input_data, label_data
def __len__(self):
return len(self.y)
def get_datasetsHDF5(data_parameters):
"""Data Loader Function.
THIS FUNCTION IS NOT DEPRECATED: Loader function rewritten.
This function loads the various data file and returns the relevand mapped datasets.
Args:
data_parameters (dict): Dictionary containing relevant information for the datafiles.
data_parameters = {
data_directory: 'path/to/directory'
train_data_file: 'training_data'
train_output_targets: 'training_targets'
train_list = 'train.txt'
validation_list = 'validation.txt'
test_list = 'test.txt'
test_data_file: 'testing_data'
test_target_file: 'testing_targets'
}
Returns:
touple: the relevant train and test datasets
"""
training_data = h5py.File(os.path.join(
data_parameters['data_directory'], data_parameters['training_data']), 'r')
testing_data = h5py.File(os.path.join(
data_parameters['data_directory'], data_parameters['testing_data']), 'r')
training_labels = h5py.File(os.path.join(
data_parameters['data_directory'], data_parameters['training_targets']), 'r')
testing_labels = h5py.File(os.path.join(
data_parameters['data_directory'], data_parameters['testing_targets']), 'r')
return (
DataMapperHDF5(training_data['data'][()],
training_labels['label'][()]),
DataMapperHDF5(testing_data['data'][()], testing_labels['label'][()])
)
def load_and_preprocess_evaluation2D(file_path, orientation, min_max=True):
"""Load & Preprocessing before evaluation
This function loads a nifty file and returns its volume and header information
Args:
file_path (str): Path to the desired file
orientation (str): String detailing the current view (COR, SAG, AXL)
min_max (bool): Flag for inducing min-max normalization of the volume
Returns:
volume (np.array): Array of training image data of data type dtype.
header (class): 'nibabel.nifti1.Nifti1Header' class object, containing image metadata
Raises:
ValueError: "Orientation value is invalid. It must be either >>coronal<<, >>axial<< or >>sagital<< "
"""
nifty_volume = nib.load(file_path[0])
volume = nifty_volume.get_fdata()
header = nifty_volume.header
if min_max:
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
else:
volume = np.round(volume)
if orientation == "sagittal":
return volume # This is assumed to be the default orientation
elif orientation == "axial":
return volume.transpose((1, 2, 0))
elif orientation == "coronal":
return volume.transpose((2, 0, 1))
else:
raise ValueError(
"Orientation value is invalid. It must be either >>coronal<<, >>axial<< or >>sagital<< ")
return volume, header
def load_and_preprocess(file_paths, orientation):
"""Load & Preprocess
This function is composed of two other function calls: one that calls a function loading the data, and another which preprocesses the data to the required format.
# TODO: Need to check if any more proprocessing would be required besides summing the tracts!
Args:
file_paths (list): List containing the input data and target labelled output data
orientation (str): String detailing the current view (COR, SAG, AXL)
Returns:
volume (np.array): Array of training image data of data type dtype.
label_map (np.array): Array of labelled image data of data type dtype.
header (class): 'nibabel.nifti1.Nifti1Header' class object, containing image metadata
"""
volume, label_map, header = load(file_paths, orientation)
return volume, label_map, header
def load(file_path, orientation):
"""Load Data Function
This function loads the required data files and extracts relevant information from it.
Args:
file_path (list): List containing the input data and target labelled output data
orientation (str): String detailing the current view (COR, SAG, AXL)
Returns:
volume (np.array): Array of training image data of data type dtype.
label_map (np.array): Array of labelled image data of data type dtype.
header (class): 'nibabel.nifti1.Nifti1Header' class object, containing image metadata
"""
nifty_volume, label_map = nib.load(file_path[0]), nib.load(file_path[1])
volume, label_map = nifty_volume.get_fdata(), label_map.ged_fdata()
# Do we need min-max normalization here? Will need to check when debuggint and testing
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
volume, label_map = set_orientation(volume, label_map, orientation)
return volume, label_map, nifty_volume.header
return target, target_demeaned
\ No newline at end of file
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment