data_logging_utils.py 6.04 KB
Newer Older
1
2
3
4
"""Data Logging Functions

Description:

5
    This folder contains several functions which, either on their own or included in larger pieces of software, perform data logging tasks.
6

7
8
9
10
11
Usage:

    To use content from this folder, import the functions and instantiate them as you wish to use them:

        from utils.data_logging_utils import function_name
12
13
14

"""

15
import os
16
import matplotlib
17
18
19
import matplotlib.pyplot as plt
import shutil
import logging
20
import numpy as np
21
import torch
22
23
24

# The SummaryWriter class provides a high-level API to create an event file in a given directory and add summaries and events to it.
# More here: https://tensorboardx.readthedocs.io/en/latest/tensorboard.html
25

26
from tensorboardX import SummaryWriter
27

28
29
import utils.data_evaluation_utils as evaluation

30
31
plt.axis('scaled')

32

33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
class LogWriter():

    """Log Writer class for the BrainMapper U-net.

    This class contains the pytorch implementation of the several logging functions required for the BrainMapper project.
    These functions are designed to keep track of progress during training, and also aid debugging.

    Args:
        number_of_classes (int): Number of classes
        logs_directory (str): Directory for outputing training logs
        experiment_name (str): Name of the experiment
        use_last_checkpoint (bool): Flag for loading the previous checkpoint
        labels (arr): Vector/Array of labels (if applicable)
        confusion_matrix_cmap (class): Colour Map to be used for the Conusion Matrix
    """

49
50
    def __init__(self, number_of_classes, logs_directory, experiment_name, use_last_checkpoint=False, labels=None, confusion_matrix_cmap=plt.cm.Blues):

51
        self.number_of_classes = number_of_classes
52
53
        training_logs_directory = os.path.join(
            logs_directory, experiment_name, "train")
54
55
        validation_logs_directory = os.path.join(
            logs_directory, experiment_name, "validation")
56
57

        # If the logs directory exist, we clear their contents to allow new logs to be created
58
59
60
61
62
        # if not use_last_checkpoint:
        #     if os.path.exists(training_logs_directory):
        #         shutil.rmtree(training_logs_directory)
        #     if os.path.exists(validation_logs_directory):
        #         shutil.rmtree(validation_logs_directory)
63
64

        self.log_writer = {
65
66
            'train': SummaryWriter(logdir=training_logs_directory),
            'validation': SummaryWriter(logdir=validation_logs_directory)
67
        }
68
69
70
71
72

        self.confusion_matrix_color_map = confusion_matrix_cmap

        self.current_iteration = 1

Andrei Roibu's avatar
Andrei Roibu committed
73
        self.labels = ['rsfMRI']
74
75

        self.logger = logging.getLogger()
76
77
        file_handler = logging.FileHandler(
            "{}/{}.log".format(os.path.join(logs_directory, experiment_name), "console_logs"))
78
79
        self.logger.addHandler(file_handler)

80
81
82
83
84
85
86
87
88
    def log(self, message):
        """Log function

        This function logs a message in the logger.

        Args:
            message (str): Message to be logged
        """

89
        self.logger.info(msg=message)
90

91
    def loss_per_iteration(self, loss_per_iteration, batch_index, iteration):
92
        """Log of loss / iteration
93

94
95
96
97
98
        This function records the loss for every iteration.

        Args:
            loss_per_iteration (torch.tensor): Value of loss for every iteration step
            batch_index (int): Index of current batch
99
            iteration (int): Current iteration value
100
101
        """

102
103
104
        print("Loss for Iteration {} is: {}".format(
            batch_index, loss_per_iteration))
        self.log_writer['train'].add_scalar(
Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
105
            'loss/iteration', loss_per_iteration, iteration)
106

107
    def loss_per_epoch(self, losses, phase, epoch, previous_loss=None):
108
109
110
111
112
113
114
115
        """Log function

        This function records the loss for every epoch.

        Args:
            losses (list): Values of all the losses recorded during the training epoch
            phase (str): Current run mode or phase
            epoch (int): Current epoch value
116
            previous_loss(float): Value of the previous epoch's loss
117
118
        """

119
120
        loss = np.mean(losses)

121
        if phase == 'train':
122
123
            # loss = losses[-1]
            print("Loss for Epoch {} of {} is: {}".format(epoch, phase, loss))
124
        else:
125
126
127
128
129
            # loss = np.mean(losses)
            if previous_loss == None:
                print("Loss for Epoch {} of {} is: {}".format(epoch, phase, loss))
            else:
                print("Loss for Epoch {} of {} is {} and Absolute Change is {}".format(epoch, phase, loss, previous_loss - loss))
130

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
131
        self.log_writer[phase].add_scalar('loss/epoch', loss, epoch)
132

Andrei Roibu's avatar
Andrei Roibu committed
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159

    def MSE_per_epoch(self, losses, phase, epoch, previous_loss=None):
        """Log function

        This function records the loss for every epoch.

        Args:
            losses (list): Values of all the losses recorded during the training epoch
            phase (str): Current run mode or phase
            epoch (int): Current epoch value
            previous_loss(float): Value of the previous epoch's loss
        """

        loss = np.mean(losses)

        if phase == 'train':
            # loss = losses[-1]
            print("MSE for Epoch {} of {} is: {}".format(epoch, phase, loss))
        else:
            # loss = np.mean(losses)
            if previous_loss == None:
                print("MSE for Epoch {} of {} is: {}".format(epoch, phase, loss))
            else:
                print("MSE for Epoch {} of {} is {} and Absolute Change is {}".format(epoch, phase, loss, previous_loss - loss))

        self.log_writer[phase].add_scalar('MSE/epoch', loss, epoch)

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
160
161
    def close(self):
        """Close the log writer
162

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
163
164
        This function closes the two log writers.
        """
165

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
166
167
168
169
170
171
172
173
174
        self.log_writer['train'].close()
        self.log_writer['validation'].close()

    def add_graph(self, model):
        """Produces network graph

        This function produces the network graph

        NOTE: Currently, the function suffers from bugs and is not implemented.
175

176
        Args:
Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
177
            model (torch.nn.Module): Model to draw.
178
179
        """

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
180
        self.log_writer['train'].add_graph(model)