data_logging_utils.py 6.06 KB
Newer Older
1
2
3
4
"""Data Logging Functions

Description:

5
    This folder contains several functions which, either on their own or included in larger pieces of software, perform data logging tasks.
6

7
8
9
10
11
Usage:

    To use content from this folder, import the functions and instantiate them as you wish to use them:

        from utils.data_logging_utils import function_name
12
13
14

"""

15
import os
16
import matplotlib
17
18
19
import matplotlib.pyplot as plt
import shutil
import logging
20
import numpy as np
21
import re
22
from textwrap import wrap
23
import torch
24
25
26

# The SummaryWriter class provides a high-level API to create an event file in a given directory and add summaries and events to it.
# More here: https://tensorboardx.readthedocs.io/en/latest/tensorboard.html
27

28
from tensorboardX import SummaryWriter
29

30
31
import utils.data_evaluation_utils as evaluation

32
33
plt.axis('scaled')

34

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class LogWriter():

    """Log Writer class for the BrainMapper U-net.

    This class contains the pytorch implementation of the several logging functions required for the BrainMapper project.
    These functions are designed to keep track of progress during training, and also aid debugging.

    Args:
        number_of_classes (int): Number of classes
        logs_directory (str): Directory for outputing training logs
        experiment_name (str): Name of the experiment
        use_last_checkpoint (bool): Flag for loading the previous checkpoint
        labels (arr): Vector/Array of labels (if applicable)
        confusion_matrix_cmap (class): Colour Map to be used for the Conusion Matrix
    """

51
52
    def __init__(self, number_of_classes, logs_directory, experiment_name, use_last_checkpoint=False, labels=None, confusion_matrix_cmap=plt.cm.Blues):

53
        self.number_of_classes = number_of_classes
54
55
        training_logs_directory = os.path.join(
            logs_directory, experiment_name, "train")
56
57
        validation_logs_directory = os.path.join(
            logs_directory, experiment_name, "validation")
58
59
60
61
62

        # If the logs directory exist, we clear their contents to allow new logs to be created
        if not use_last_checkpoint:
            if os.path.exists(training_logs_directory):
                shutil.rmtree(training_logs_directory)
63
64
            if os.path.exists(validation_logs_directory):
                shutil.rmtree(validation_logs_directory)
65
66

        self.log_writer = {
67
68
            'train': SummaryWriter(logdir=training_logs_directory),
            'validation': SummaryWriter(logdir=validation_logs_directory)
69
        }
70
71
72
73
74

        self.confusion_matrix_color_map = confusion_matrix_cmap

        self.current_iteration = 1

Andrei Roibu's avatar
Andrei Roibu committed
75
        self.labels = ['rsfMRI']
76
77

        self.logger = logging.getLogger()
78
79
        file_handler = logging.FileHandler(
            "{}/{}.log".format(os.path.join(logs_directory, experiment_name), "console_logs"))
80
81
        self.logger.addHandler(file_handler)

82
83
84
85
86
87
88
89
90
    def log(self, message):
        """Log function

        This function logs a message in the logger.

        Args:
            message (str): Message to be logged
        """

91
        self.logger.info(msg=message)
92

93
    def loss_per_iteration(self, loss_per_iteration, batch_index, iteration):
94
        """Log of loss / iteration
95

96
97
98
99
100
        This function records the loss for every iteration.

        Args:
            loss_per_iteration (torch.tensor): Value of loss for every iteration step
            batch_index (int): Index of current batch
101
            iteration (int): Current iteration value
102
103
        """

104
105
106
        print("Loss for Iteration {} is: {}".format(
            batch_index, loss_per_iteration))
        self.log_writer['train'].add_scalar(
Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
107
            'loss/iteration', loss_per_iteration, iteration)
108

109
    def loss_per_epoch(self, losses, phase, epoch, previous_loss=None):
110
111
112
113
114
115
116
117
        """Log function

        This function records the loss for every epoch.

        Args:
            losses (list): Values of all the losses recorded during the training epoch
            phase (str): Current run mode or phase
            epoch (int): Current epoch value
118
            previous_loss(float): Value of the previous epoch's loss
119
120
        """

121
122
        loss = np.mean(losses)

123
        if phase == 'train':
124
125
            # loss = losses[-1]
            print("Loss for Epoch {} of {} is: {}".format(epoch, phase, loss))
126
        else:
127
128
129
130
131
            # loss = np.mean(losses)
            if previous_loss == None:
                print("Loss for Epoch {} of {} is: {}".format(epoch, phase, loss))
            else:
                print("Loss for Epoch {} of {} is {} and Absolute Change is {}".format(epoch, phase, loss, previous_loss - loss))
132

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
133
        self.log_writer[phase].add_scalar('loss/epoch', loss, epoch)
134

Andrei Roibu's avatar
Andrei Roibu committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

    def MSE_per_epoch(self, losses, phase, epoch, previous_loss=None):
        """Log function

        This function records the loss for every epoch.

        Args:
            losses (list): Values of all the losses recorded during the training epoch
            phase (str): Current run mode or phase
            epoch (int): Current epoch value
            previous_loss(float): Value of the previous epoch's loss
        """

        loss = np.mean(losses)

        if phase == 'train':
            # loss = losses[-1]
            print("MSE for Epoch {} of {} is: {}".format(epoch, phase, loss))
        else:
            # loss = np.mean(losses)
            if previous_loss == None:
                print("MSE for Epoch {} of {} is: {}".format(epoch, phase, loss))
            else:
                print("MSE for Epoch {} of {} is {} and Absolute Change is {}".format(epoch, phase, loss, previous_loss - loss))

        self.log_writer[phase].add_scalar('MSE/epoch', loss, epoch)

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
162
163
    def close(self):
        """Close the log writer
164

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
165
166
        This function closes the two log writers.
        """
167

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
168
169
170
171
172
173
174
175
176
        self.log_writer['train'].close()
        self.log_writer['validation'].close()

    def add_graph(self, model):
        """Produces network graph

        This function produces the network graph

        NOTE: Currently, the function suffers from bugs and is not implemented.
177

178
        Args:
Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
179
            model (torch.nn.Module): Model to draw.
180
181
        """

Andrei-Claudiu Roibu's avatar
Andrei-Claudiu Roibu committed
182
        self.log_writer['train'].add_graph(model)