Commit a2b51e13 authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

TRAIN WORKS! + code clean, pep8 formated, debug coms deleted

parent 98e87284
......@@ -209,6 +209,7 @@ class BrainMapperUNet3D(nn.Module):
# DEPRECATED ARCHITECTURES!
class BrainMapperUNet(nn.Module):
"""Architecture class BrainMapper U-net.
......@@ -377,6 +378,7 @@ class BrainMapperUNet(nn.Module):
return prediction
class BrainMapperUNet3D_Simple(nn.Module):
"""Architecture class BrainMapper 3D U-net.
......
......@@ -131,7 +131,6 @@ def train(data_parameters, training_parameters, network_parameters, misc_paramet
pin_memory=True
)
if training_parameters['use_pre_trained']:
BrainMapperModel = torch.load(training_parameters['pre_trained_path'])
else:
......@@ -216,7 +215,7 @@ def evaluate_score(training_parameters, network_parameters, misc_parameters, eva
evaluation_parameters['saved_predictions_directory']
)
average_dice_score = evaluations.evaluate_dice_score(trained_model_path=evaluation_parameters['trained_model_path'],
_ = evaluations.evaluate_dice_score(trained_model_path=evaluation_parameters['trained_model_path'],
number_of_classes=network_parameters['number_of_classes'],
data_directory=evaluation_parameters['data_directory'],
targets_directory=evaluation_parameters[
......@@ -357,7 +356,8 @@ if __name__ == '__main__':
if data_shuffling_flag == True:
# Here we shuffle the data!
data_test_train_validation_split(data_parameters['data_directory'], data_parameters['train_percentage'], data_parameters['validation_percentage'])
data_test_train_validation_split(
data_parameters['data_directory'], data_parameters['train_percentage'], data_parameters['validation_percentage'])
update_shuffling_flag('settings.ini')
# TODO: This might also be a very good point to add cross-validation later
else:
......@@ -365,27 +365,30 @@ if __name__ == '__main__':
if arguments.mode == 'train':
train(data_parameters, training_parameters,
network_parameters, misc_parameters)
# elif arguments.mode == 'evaluate-score':
# evaluate_score(training_parameters,
# network_parameters, misc_parameters, evaluation_parameters)
# elif arguments.mode == 'evaluate-mapping':
# logging.basicConfig(filename='evaluate-mapping-error.log')
# if arguments.settings_path is not None:
# settings_evaluation = Settings(arguments.settings_path)
# else:
# settings_evaluation = Settings('settings_evaluation.ini')
# mapping_evaluation_parameters = settings_evaluation['MAPPING']
# evaluate_mapping(mapping_evaluation_parameters)
# elif arguments.mode == 'clear-experiments':
# shutil.rmtree(os.path.join(
# misc_parameters['experiments_directory'], training_parameters['experiment_name']))
# shutil.rmtree(os.path.join(
# misc_parameters['logs_directory'], training_parameters['experiment_name']))
# print('Cleared the current experiments and logs directory successfully!')
# elif arguments.mode == 'clear-everything':
# delete_files(misc_parameters['experiments_directory'])
# delete_files(misc_parameters['logs_directory'])
# print('Cleared the current experiments and logs directory successfully!')
# else:
# raise ValueError(
# 'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
# NOTE: THE EVAL FUNCTIONS HAVE NOT YET BEEN DEBUGGED (16/04/20)
elif arguments.mode == 'evaluate-score':
evaluate_score(training_parameters,
network_parameters, misc_parameters, evaluation_parameters)
elif arguments.mode == 'evaluate-mapping':
logging.basicConfig(filename='evaluate-mapping-error.log')
if arguments.settings_path is not None:
settings_evaluation = Settings(arguments.settings_path)
else:
settings_evaluation = Settings('settings_evaluation.ini')
mapping_evaluation_parameters = settings_evaluation['MAPPING']
evaluate_mapping(mapping_evaluation_parameters)
elif arguments.mode == 'clear-experiments':
shutil.rmtree(os.path.join(
misc_parameters['experiments_directory'], training_parameters['experiment_name']))
shutil.rmtree(os.path.join(
misc_parameters['logs_directory'], training_parameters['experiment_name']))
print('Cleared the current experiments and logs directory successfully!')
elif arguments.mode == 'clear-everything':
delete_files(misc_parameters['experiments_directory'])
delete_files(misc_parameters['logs_directory'])
print('Cleared the current experiments and logs directory successfully!')
else:
raise ValueError(
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
......@@ -117,7 +117,6 @@ class Solver():
if use_last_checkpoint:
self.load_checkpoint()
def train(self, train_loader, test_loader):
"""Training Function
......@@ -143,7 +142,8 @@ class Solver():
print('=====================')
print('Model Name: {}'.format(self.model_name))
if torch.cuda.is_available():
print('Device Type: {}'.format(torch.cuda.get_device_name(self.device)))
print('Device Type: {}'.format(
torch.cuda.get_device_name(self.device)))
else:
print('Device Type: {}'.format(self.device))
start_time = datetime.now()
......@@ -159,8 +159,6 @@ class Solver():
print('-> Phase: {}'.format(phase))
losses = []
# outputs = []
# y_values = []
if phase == 'train':
model.train()
......@@ -173,8 +171,8 @@ class Solver():
y = sampled_batch[1].type(torch.FloatTensor)
# We add an extra dimension (~ number of channels) for the 3D convolutions.
X = torch.unsqueeze(X, dim= 1)
y = torch.unsqueeze(y, dim= 1)
X = torch.unsqueeze(X, dim=1)
y = torch.unsqueeze(y, dim=1)
if model.test_if_cuda:
X = X.cuda(self.device, non_blocking=True)
......@@ -196,11 +194,7 @@ class Solver():
iteration += 1
losses.append(loss.item())
# outputs.append(torch.max(y_hat, dim=1)[1].cpu())
# y_values.append(y.cpu())
# Clear the memory
......@@ -214,26 +208,9 @@ class Solver():
print("100%", flush=True)
with torch.no_grad():
# output_array, y_array = torch.cat(
# outputs), torch.cat(y_values)
self.LogWriter.loss_per_epoch(losses, phase, epoch)
# dice_score_mean = self.LogWriter.dice_score_per_epoch(
# phase, output_array, y_array, epoch)
# if phase == 'test':
# if dice_score_mean > self.best_mean_score:
# self.best_mean_score = dice_score_mean
# self.best_mean_score_epoch = epoch
# index = np.random.choice(
# len(dataloaders[phase].dataset.X), size=3, replace=False)
# self.LogWriter.sample_image_per_epoch(prediction=model.predict(dataloaders[phase].dataset.X[index], self.device),
# ground_truth=dataloaders[phase].dataset.y[index],
# phase=phase,
# epoch=epoch)
print("Epoch {}/{} DONE!".format(epoch, self.number_epochs))
self.save_checkpoint(state={'epoch': epoch + 1,
......
......@@ -560,13 +560,3 @@ def get_datasetsHDF5(data_parameters):
training_labels['label'][()]),
DataMapperHDF5(testing_data['data'][()], testing_labels['label'][()])
)
if __name__ == "__main__":
pass
# folder_location = "../well/win-biobank/projects/imaging/data/data3/subjectsAll/"
# # data_test_train_validation_split(folder_location, 90, 5)
# subDirectoryList = directory_reader(folder_location, write_txt=True)
# print(subDirectoryList)
# tract_sum_generator(folder_location)
\ No newline at end of file
......@@ -81,6 +81,7 @@ class MSELoss(_WeightedLoss):
# NOTE: THESE LOSSES ARE USUALLY USED FOR CLASSIFICATION TASKS.
# THIS IS NOT A CLASSIFICATION TASK. THUS, THESE ARE IGNORED FOR NOW!
class CrossEntropyLoss(_WeightedLoss):
"""Cross Entropy Loss
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment