Commit 98ccda23 authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

bug fixes - fixed settings error and refactored load_data variable names

parent b4d72d8c
......@@ -51,7 +51,7 @@ torch.set_default_tensor_type(torch.FloatTensor)
def load_data(data_parameters):
"""Dataset Loader
This function loads the training and testing datasets.
This function loads the training and validation datasets.
TODO: Will need to define if all the training data is loaded as bulk or individually!
Args:
......@@ -59,16 +59,16 @@ def load_data(data_parameters):
Returns:
train_data (dataset object): Pytorch map-style dataset object, mapping indices to training data samples.
test_data (dataset object): Pytorch map-style dataset object, mapping indices to testing data samples.
validation_data (dataset object): Pytorch map-style dataset object, mapping indices to testing data samples.
"""
print("Data is loading...")
train_data, test_data = get_datasets(data_parameters)
train_data, validation_data = get_datasets(data_parameters)
print("Data has loaded!")
print("Training dataset size is {}".format(len(train_data)))
print("Testing dataset size is {}".format(len(test_data)))
print("Validation dataset size is {}".format(len(validation_data)))
return train_data, test_data
return train_data, validation_data
def train(data_parameters, training_parameters, network_parameters, misc_parameters):
......@@ -133,7 +133,7 @@ def train(data_parameters, training_parameters, network_parameters, misc_paramet
)
if training_parameters['use_pre_trained']:
BrainMapperModel = torch.load(traikning_parameters['pre_trained_path'])
BrainMapperModel = torch.load(training_parameters['pre_trained_path'])
else:
BrainMapperModel = BrainMapperUNet(network_parameters)
......@@ -201,7 +201,7 @@ def evaluate_score(training_parameters, network_parameters, misc_parameters, eva
}
"""
################# TODO - NEED TO UPDATE THE DATA FUNCTIONS!
# TODO - NEED TO UPDATE THE DATA FUNCTIONS!
logWriter = LogWriter(number_of_classes=network_parameters['number_of_classes'],
logs_directory=misc_parameters['logs_directory'],
......@@ -352,37 +352,39 @@ if __name__ == '__main__':
data_shuffling_flag = data_parameters['data_split_flag']
if data_shuffling_flag == True:
# Here we shuffle the data!
data_test_train_validation_split(data_parameters['data_directory'], data_parameters['train_percentage'], data_parameters['validation_percentage'])
update_shuffling_flag('settings.ini')
# TODO: This might also be a very good point to add cross-validation later
else:
if arguments.mode == 'train':
train(data_parameters, training_parameters,
network_parameters, misc_parameters)
elif arguments.mode == 'evaluate-score':
evaluate_score(training_parameters,
network_parameters, misc_parameters, evaluation_parameters)
elif arguments.mode == 'evaluate-mapping':
logging.basicConfig(filename='evaluate-mapping-error.log')
if arguments.settings_path is not None:
settings_evaluation = Settings(arguments.settings_path)
else:
settings_evaluation = Settings('settings_evaluation.ini')
mapping_evaluation_parameters = settings_evaluation['MAPPING']
evaluate_mapping(mapping_evaluation_parameters)
elif arguments.mode == 'clear-experiments':
shutil.rmtree(os.path.join(
misc_parameters['experiments_directory'], training_parameters['experiment_name']))
shutil.rmtree(os.path.join(
misc_parameters['logs_directory'], training_parameters['experiment_name']))
print('Cleared the current experiments and logs directory successfully!')
elif arguments.mode == 'clear-everything':
delete_files(misc_parameters['experiments_directory'])
delete_files(misc_parameters['logs_directory'])
print('Cleared the current experiments and logs directory successfully!')
else:
raise ValueError(
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
load_data(data_parameters)
# if data_shuffling_flag == True:
# # Here we shuffle the data!
# data_test_train_validation_split(data_parameters['data_directory'], data_parameters['train_percentage'], data_parameters['validation_percentage'])
# update_shuffling_flag('settings.ini')
# # TODO: This might also be a very good point to add cross-validation later
# else:
# if arguments.mode == 'train':
# train(data_parameters, training_parameters,
# network_parameters, misc_parameters)
# elif arguments.mode == 'evaluate-score':
# evaluate_score(training_parameters,
# network_parameters, misc_parameters, evaluation_parameters)
# elif arguments.mode == 'evaluate-mapping':
# logging.basicConfig(filename='evaluate-mapping-error.log')
# if arguments.settings_path is not None:
# settings_evaluation = Settings(arguments.settings_path)
# else:
# settings_evaluation = Settings('settings_evaluation.ini')
# mapping_evaluation_parameters = settings_evaluation['MAPPING']
# evaluate_mapping(mapping_evaluation_parameters)
# elif arguments.mode == 'clear-experiments':
# shutil.rmtree(os.path.join(
# misc_parameters['experiments_directory'], training_parameters['experiment_name']))
# shutil.rmtree(os.path.join(
# misc_parameters['logs_directory'], training_parameters['experiment_name']))
# print('Cleared the current experiments and logs directory successfully!')
# elif arguments.mode == 'clear-everything':
# delete_files(misc_parameters['experiments_directory'])
# delete_files(misc_parameters['logs_directory'])
# print('Cleared the current experiments and logs directory successfully!')
# else:
# raise ValueError(
# 'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
......@@ -3,13 +3,13 @@ data_directory = "../well/win-biobank/projects/imaging/data/data3/subjectsAll/"
data_split_flag = False
train_percentage = 90
validation_percentage = 5
train_list = 'train.txt'
validation_list = 'validation.txt'
test_list = 'test.txt'
train_data_file = "/dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
train_output_targets = "/fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
test_data_file = "/dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
test_target_file = "/fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
train_list = "train.txt"
validation_list = "validation.txt"
test_list = "test.txt"
train_data_file = "dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
train_output_targets = "fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
validation_data_file = "dMRI/autoptx_preproc/tractsNormSummed.nii.gz"
validation_target_file = "fMRI/rfMRI_25.dr/dr_stage2.nii.gz"
[TRAINING]
training_batch_size = 5
......
......@@ -563,9 +563,10 @@ def get_datasetsHDF5(data_parameters):
if __name__ == "__main__":
pass
folder_location = "../well/win-biobank/projects/imaging/data/data3/subjectsAll/"
# data_test_train_validation_split(folder_location, 90, 5)
subDirectoryList = directory_reader(folder_location, write_txt=True)
print(subDirectoryList)
tract_sum_generator(folder_location)
# folder_location = "../well/win-biobank/projects/imaging/data/data3/subjectsAll/"
# # data_test_train_validation_split(folder_location, 90, 5)
# subDirectoryList = directory_reader(folder_location, write_txt=True)
# print(subDirectoryList)
# tract_sum_generator(folder_location)
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment