Commit 6f01a19e authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

added call for test/train/val split

parent cb210299
......@@ -39,7 +39,7 @@ import torch.utils.data as data
from solver import Solver
from BrainMapperUNet import BrainMapperUNet
from utils.data_utils import get_datasets
from utils.data_utils import get_datasets, data_test_train_validation_split
import utils.data_evaluation_utils as evaluations
from utils.data_logging_utils import LogWriter
......@@ -201,6 +201,8 @@ def evaluate_score(training_parameters, network_parameters, misc_parameters, eva
}
"""
################# TODO - NEED TO UPDATE THE DATA FUNCTIONS!
logWriter = LogWriter(number_of_classes=network_parameters['number_of_classes'],
logs_directory=misc_parameters['logs_directory'],
experiment_name=training_parameters['experiment_name']
......@@ -348,30 +350,39 @@ if __name__ == '__main__':
misc_parameters = settings['MISC']
evaluation_parameters = settings['EVALUATION']
if arguments.mode == 'train':
train(data_parameters, training_parameters,
network_parameters, misc_parameters)
elif arguments.mode == 'evaluate-score':
evaluate_score(training_parameters,
network_parameters, misc_parameters, evaluation_parameters)
elif arguments.mode == 'evaluate-mapping':
logging.basicConfig(filename='evaluate-mapping-error.log')
if arguments.settings_path is not None:
settings_evaluation = Settings(arguments.settings_path)
else:
settings_evaluation = Settings('settings_evaluation.ini')
mapping_evaluation_parameters = settings_evaluation['MAPPING']
evaluate_mapping(mapping_evaluation_parameters)
elif arguments.mode == 'clear-experiments':
shutil.rmtree(os.path.join(
misc_parameters['experiments_directory'], training_parameters['experiment_name']))
shutil.rmtree(os.path.join(
misc_parameters['logs_directory'], training_parameters['experiment_name']))
print('Cleared the current experiments and logs directory successfully!')
elif arguments.mode == 'clear-everything':
delete_files(misc_parameters['experiments_directory'])
delete_files(misc_parameters['logs_directory'])
print('Cleared the current experiments and logs directory successfully!')
data_shuffling_flag = data_parameters['data_split_flag']
if data_shuffling_flag == True:
# Here we shuffle the data!
data_test_train_validation_split(data_parameters['data_directory'], data_parameters['train_percentage'], data_parameters['validation_percentage'])
# TODO: This might also be a very good point to add cross-validation later
else:
raise ValueError(
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
if arguments.mode == 'train':
train(data_parameters, training_parameters,
network_parameters, misc_parameters)
elif arguments.mode == 'evaluate-score':
evaluate_score(training_parameters,
network_parameters, misc_parameters, evaluation_parameters)
elif arguments.mode == 'evaluate-mapping':
logging.basicConfig(filename='evaluate-mapping-error.log')
if arguments.settings_path is not None:
settings_evaluation = Settings(arguments.settings_path)
else:
settings_evaluation = Settings('settings_evaluation.ini')
mapping_evaluation_parameters = settings_evaluation['MAPPING']
evaluate_mapping(mapping_evaluation_parameters)
elif arguments.mode == 'clear-experiments':
shutil.rmtree(os.path.join(
misc_parameters['experiments_directory'], training_parameters['experiment_name']))
shutil.rmtree(os.path.join(
misc_parameters['logs_directory'], training_parameters['experiment_name']))
print('Cleared the current experiments and logs directory successfully!')
elif arguments.mode == 'clear-everything':
delete_files(misc_parameters['experiments_directory'])
delete_files(misc_parameters['logs_directory'])
print('Cleared the current experiments and logs directory successfully!')
else:
raise ValueError(
'Invalid mode value! Only supports: train, evaluate-score, evaluate-mapping, clear-experiments and clear-everything')
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment