Commit 92f1b83d authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

Merge branch 'autoencoder' into 'master'

Autoencoder

See merge request !2
parents bd5515dc 159001d8
......@@ -120,4 +120,7 @@ datasets/
files.txt
jobscript.sge.sh
*.nii.gz
stuff/
\ No newline at end of file
stuff/
test/*
.DS_Store
logs/
......@@ -18,6 +18,7 @@ import numpy as np
import torch
import torch.nn as nn
import utils.modules as modules
from torch.nn.init import _calculate_fan_in_and_fan_out as calculate_fan
class BrainMapperAE3D(nn.Module):
......@@ -80,6 +81,10 @@ class BrainMapperAE3D(nn.Module):
self.transformerBlock4 = modules.ResNetBlock3D(parameters)
self.transformerBlock5 = modules.ResNetBlock3D(parameters)
self.transformerBlock6 = modules.ResNetBlock3D(parameters)
self.transformerBlock7 = modules.ResNetBlock3D(parameters)
self.transformerBlock8 = modules.ResNetBlock3D(parameters)
self.transformerBlock9 = modules.ResNetBlock3D(parameters)
self.transformerBlock10 = modules.ResNetBlock3D(parameters)
# Decoder
......@@ -125,6 +130,10 @@ class BrainMapperAE3D(nn.Module):
X = self.transformerBlock4.forward(X)
X = self.transformerBlock5.forward(X)
X = self.transformerBlock6.forward(X)
X = self.transformerBlock7.forward(X)
X = self.transformerBlock8.forward(X)
X = self.transformerBlock9.forward(X)
X = self.transformerBlock10.forward(X)
# Decoder
......@@ -214,5 +223,10 @@ class BrainMapperAE3D(nn.Module):
for _, subsubmodule in submodule.named_children():
if isinstance(subsubmodule, (torch.nn.PReLU, torch.nn.Dropout3d, torch.nn.MaxPool3d)) == False:
subsubmodule.reset_parameters()
# if isinstance(subsubmodule, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)):
# gain = np.sqrt(np.divide(2, 1 + np.power(0.25, 2)))
# fan, _ = calculate_fan(subsubmodule.weight)
# std = np.divide(gain, np.sqrt(fan))
# subsubmodule.weight.data.normal_(0, std)
print("Initialized network parameters!")
\ No newline at end of file
print("Initialized network parameters!")
"""Brain Mapper U-Net Architecture
Description:
This folder contains the Pytorch implementation of the core U-net architecture.
This arcitecture predicts functional connectivity rsfMRI from structural connectivity information from dMRI.
Usage:
To use this module, import it and instantiate is as you wish:
from BrainMapperUNet import BrainMapperUNet
deep_learning_model = BrainMapperUnet(parameters)
"""
import numpy as np
import torch
import torch.nn as nn
import utils.modules as modules
class BrainMapperUNet3D(nn.Module):
"""Architecture class for Traditional BrainMapper 3D U-net.
This class contains the pytorch implementation of the U-net architecture underpinning the BrainMapper project.
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'kernel_depth': 5
'kernel_classification': 1
'input_channels': 1
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
'up_mode': 'upconv'
'number_of_classes': 1
}
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
def __init__(self, parameters):
super(BrainMapperUNet3D, self).__init__()
original_input_channels = parameters['input_channels']
original_output_channels = parameters['output_channels']
self.encoderBlock1 = modules.EncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] * 2
self.encoderBlock2 = modules.EncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] * 2
self.encoderBlock3 = modules.EncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] * 2
self.encoderBlock4 = modules.EncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] * 2
self.bottleneck = modules.ConvolutionalBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] // 2
self.decoderBlock1 = modules.DecoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] // 2
self.decoderBlock2 = modules.DecoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] // 2
self.decoderBlock3 = modules.DecoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
parameters['output_channels'] = parameters['output_channels'] // 2
self.decoderBlock4 = modules.DecoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
self.classifier = modules.ClassifierBlock3D(parameters)
parameters['input_channels'] = original_input_channels
parameters['output_channels'] = original_output_channels
def forward(self, X):
"""Forward pass for 3D U-net
Function computing the forward pass through the 3D U-Net
The input to the function is the dMRI map
Args:
X (torch.tensor): Input dMRI map, shape = (N x C x D x H x W)
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
Y_encoder_1, Y_np1, _ = self.encoderBlock1.forward(X)
Y_encoder_2, Y_np2, _ = self.encoderBlock2.forward(
Y_encoder_1)
del Y_encoder_1
Y_encoder_3, Y_np3, _ = self.encoderBlock3.forward(
Y_encoder_2)
del Y_encoder_2
Y_encoder_4, Y_np4, _ = self.encoderBlock4.forward(
Y_encoder_3)
del Y_encoder_3
Y_bottleNeck = self.bottleneck.forward(Y_encoder_4)
del Y_encoder_4
Y_decoder_1 = self.decoderBlock1.forward(
Y_bottleNeck, Y_np4)
del Y_bottleNeck, Y_np4
Y_decoder_2 = self.decoderBlock2.forward(
Y_decoder_1, Y_np3)
del Y_decoder_1, Y_np3
Y_decoder_3 = self.decoderBlock3.forward(
Y_decoder_2, Y_np2)
del Y_decoder_2, Y_np2
Y_decoder_4 = self.decoderBlock4.forward(
Y_decoder_3, Y_np1)
del Y_decoder_3, Y_np1
probability_map = self.classifier.forward(Y_decoder_4)
del Y_decoder_4
return probability_map
def save(self, path):
"""Model Saver
Function saving the model with all its parameters to a given path.
The path must end with a *.model argument.
Args:
path (str): Path string
"""
print("Saving Model... {}".format(path))
torch.save(self, path)
@property
def test_if_cuda(self):
"""Cuda Test
This function tests if the model parameters are allocated to a CUDA enabled GPU.
Returns:
bool: Flag indicating True if the tensor is stored on the GPU and Flase otherwhise
"""
return next(self.parameters()).is_cuda
def predict(self, X, device=0):
"""Post-training Output Prediction
This function predicts the output of the of the U-net post-training
Args:
X (torch.tensor): input dMRI volume
device (int/str): Device type used for training (int - GPU id, str- CPU)
Returns:
prediction (ndarray): predicted output after training
"""
self.eval() # PyToch module setting network to evaluation mode
if type(X) is np.ndarray:
X = torch.tensor(X, requires_grad=False).type(torch.FloatTensor)
elif type(X) is torch.Tensor and not X.is_cuda:
X = X.type(torch.FloatTensor).cuda(device, non_blocking=True)
# .cuda() call transfers the densor from the CPU to the GPU if that is the case.
# Non-blocking argument lets the caller bypas synchronization when necessary
with torch.no_grad(): # Causes operations to have no gradients
output = self.forward(X)
_, idx = torch.max(output, 1)
# We retrieve the tensor held by idx (.data), and map it to a cpu as an ndarray
idx = idx.data.cpu().numpy()
prediction = np.squeeze(idx)
del X, output, idx
return prediction
def reset_parameters(self):
"""Parameter Initialization
This function (re)initializes the parameters of the defined network.
This function is a wrapper for the reset_parameters() function defined for each module.
More information can be found here: https://discuss.pytorch.org/t/what-is-the-default-initialization-of-a-conv2d-layer-and-linear-layer/16055 + https://discuss.pytorch.org/t/how-to-reset-model-weights-to-effectively-implement-crossvalidation/53859
An alternative (re)initialization method is described here: https://discuss.pytorch.org/t/how-to-reset-variables-values-in-nn-modules/32639
"""
print("Initializing network parameters...")
for _, module in self.named_children():
for _, submodule in module.named_children():
for _, subsubmodule in submodule.named_children():
if isinstance(subsubmodule, (torch.nn.PReLU, torch.nn.Dropout3d, torch.nn.MaxPool3d)) == False:
subsubmodule.reset_parameters()
print("Initialized network parameters!")
class BrainMapperCompResUNet3D(nn.Module):
"""Architecture class for Competitive Residual DenseBlock BrainMapper 3D U-net.
This class contains the pytorch implementation of the U-net architecture underpinning the BrainMapper project.
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'kernel_depth': 5
'kernel_classification': 1
'input_channels': 1
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
'up_mode': 'upconv'
'number_of_classes': 1
}
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
def __init__(self, parameters):
super(BrainMapperCompResUNet3D, self).__init__()
original_input_channels = parameters['input_channels']
original_output_channels = parameters['output_channels']
self.encoderBlock1 = modules.InCompDensEncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
self.encoderBlock2 = modules.CompDensEncoderBlock3D(parameters)
self.encoderBlock3 = modules.CompDensEncoderBlock3D(parameters)
self.encoderBlock4 = modules.CompDensEncoderBlock3D(parameters)
self.bottleneck = modules.CompDensBlock3D(parameters)
self.decoderBlock1 = modules.CompDensDecoderBlock3D(parameters)
self.decoderBlock2 = modules.CompDensDecoderBlock3D(parameters)
self.decoderBlock3 = modules.CompDensDecoderBlock3D(parameters)
self.decoderBlock4 = modules.CompDensDecoderBlock3D(parameters)
self.classifier = modules.CompDensClassifierBlock3D(parameters)
parameters['input_channels'] = original_input_channels
parameters['output_channels'] = original_output_channels
def forward(self, X):
"""Forward pass for 3D U-net
Function computing the forward pass through the 3D U-Net
The input to the function is the dMRI map
Args:
X (torch.tensor): Input dMRI map, shape = (N x C x D x H x W)
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
Y_encoder_1, Y_np1, _ = self.encoderBlock1.forward(X)
Y_encoder_2, Y_np2, _ = self.encoderBlock2.forward(
Y_encoder_1)
del Y_encoder_1
Y_encoder_3, Y_np3, _ = self.encoderBlock3.forward(
Y_encoder_2)
del Y_encoder_2
Y_encoder_4, Y_np4, _ = self.encoderBlock4.forward(
Y_encoder_3)
del Y_encoder_3
Y_bottleNeck = self.bottleneck.forward(Y_encoder_4)
del Y_encoder_4
Y_decoder_1 = self.decoderBlock1.forward(
Y_bottleNeck, Y_np4)
del Y_bottleNeck, Y_np4
Y_decoder_2 = self.decoderBlock2.forward(
Y_decoder_1, Y_np3)
del Y_decoder_1, Y_np3
Y_decoder_3 = self.decoderBlock3.forward(
Y_decoder_2, Y_np2)
del Y_decoder_2, Y_np2
Y_decoder_4 = self.decoderBlock4.forward(
Y_decoder_3, Y_np1)
del Y_decoder_3, Y_np1
probability_map = self.classifier.forward(Y_decoder_4)
del Y_decoder_4
return probability_map
def save(self, path):
"""Model Saver
Function saving the model with all its parameters to a given path.
The path must end with a *.model argument.
Args:
path (str): Path string
"""
print("Saving Model... {}".format(path))
torch.save(self, path)
@property
def test_if_cuda(self):
"""Cuda Test
This function tests if the model parameters are allocated to a CUDA enabled GPU.
Returns:
bool: Flag indicating True if the tensor is stored on the GPU and Flase otherwhise
"""
return next(self.parameters()).is_cuda
def predict(self, X, device=0):
"""Post-training Output Prediction
This function predicts the output of the of the U-net post-training
Args:
X (torch.tensor): input dMRI volume
device (int/str): Device type used for training (int - GPU id, str- CPU)
Returns:
prediction (ndarray): predicted output after training
"""
self.eval() # PyToch module setting network to evaluation mode
if type(X) is np.ndarray:
X = torch.tensor(X, requires_grad=False).type(torch.FloatTensor)
elif type(X) is torch.Tensor and not X.is_cuda:
X = X.type(torch.FloatTensor).cuda(device, non_blocking=True)
# .cuda() call transfers the densor from the CPU to the GPU if that is the case.
# Non-blocking argument lets the caller bypas synchronization when necessary
with torch.no_grad(): # Causes operations to have no gradients
output = self.forward(X)
_, idx = torch.max(output, 1)
# We retrieve the tensor held by idx (.data), and map it to a cpu as an ndarray
idx = idx.data.cpu().numpy()
prediction = np.squeeze(idx)
del X, output, idx
return prediction
def reset_parameters(self):
"""Parameter Initialization
This function (re)initializes the parameters of the defined network.
This function is a wrapper for the reset_parameters() function defined for each module.
More information can be found here: https://discuss.pytorch.org/t/what-is-the-default-initialization-of-a-conv2d-layer-and-linear-layer/16055 + https://discuss.pytorch.org/t/how-to-reset-model-weights-to-effectively-implement-crossvalidation/53859
An alternative (re)initialization method is described here: https://discuss.pytorch.org/t/how-to-reset-variables-values-in-nn-modules/32639
"""
print("Initializing network parameters...")
for _, module in self.named_children():
for _, submodule in module.named_children():
for _, subsubmodule in submodule.named_children():
if isinstance(subsubmodule, (torch.nn.PReLU, torch.nn.Dropout3d, torch.nn.MaxPool3d)) == False:
subsubmodule.reset_parameters()
print("Initialized network parameters!")
class BrainMapperResUNet3Dshallow(nn.Module):
"""Architecture class for Residual DenseBlock BrainMapper 3D U-net.
This class contains the pytorch implementation of the U-net architecture underpinning the BrainMapper project.
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'kernel_depth': 5
'kernel_classification': 1
'input_channels': 1
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
'up_mode': 'upconv'
'number_of_classes': 1
}
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
def __init__(self, parameters):
super(BrainMapperResUNet3Dshallow, self).__init__()
original_input_channels = parameters['input_channels']
original_output_channels = parameters['output_channels']
self.encoderBlock1 = modules.DensEncoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
self.encoderBlock2 = modules.DensEncoderBlock3D(parameters)
self.encoderBlock3 = modules.DensEncoderBlock3D(parameters)
self.bottleneck = modules.DensBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels'] * 2
self.decoderBlock1 = modules.DensDecoderBlock3D(parameters)
self.decoderBlock2 = modules.DensDecoderBlock3D(parameters)
self.decoderBlock3 = modules.DensDecoderBlock3D(parameters)
parameters['input_channels'] = parameters['output_channels']
self.classifier = modules.DensClassifierBlock3D(parameters)
parameters['input_channels'] = original_input_channels
parameters['output_channels'] = original_output_channels
def forward(self, X):
"""Forward pass for 3D U-net
Function computing the forward pass through the 3D U-Net
The input to the function is the dMRI map
Args:
X (torch.tensor): Input dMRI map, shape = (N x C x D x H x W)
Returns:
probability_map (torch.tensor): Output forward passed tensor through the U-net block
"""
Y_encoder_1, Y_np1, _ = self.encoderBlock1.forward(X)
Y_encoder_2, Y_np2, _ = self.encoderBlock2.forward(
Y_encoder_1)
del Y_encoder_1
Y_encoder_3, Y_np3, _ = self.encoderBlock3.forward(
Y_encoder_2)
del Y_encoder_2
Y_bottleNeck = self.bottleneck.forward(Y_encoder_3)
del Y_encoder_3
Y_decoder_1 = self.decoderBlock1.forward(
Y_bottleNeck, Y_np3)
del Y_bottleNeck, Y_np3
Y_decoder_2 = self.decoderBlock2.forward(
Y_decoder_1, Y_np2)
del Y_decoder_1, Y_np2
Y_decoder_3 = self.decoderBlock3.forward(
Y_decoder_2, Y_np1)
del Y_decoder_2, Y_np1
probability_map = self.classifier.forward(Y_decoder_3)
del Y_decoder_3
return probability_map
def save(self, path):
"""Model Saver
Function saving the model with all its parameters to a given path.
The path must end with a *.model argument.
Args:
path (str): Path string
"""
print("Saving Model... {}".format(path))
torch.save(self, path)
@property
def test_if_cuda(self):
"""Cuda Test
This function tests if the model parameters are allocated to a CUDA enabled GPU.
Returns:
bool: Flag indicating True if the tensor is stored on the GPU and Flase otherwhise
"""
return next(self.parameters()).is_cuda
def predict(self, X, device=0):
"""Post-training Output Prediction
This function predicts the output of the of the U-net post-training
Args:
X (torch.tensor): input dMRI volume
device (int/str): Device type used for training (int - GPU id, str- CPU)
Returns:
prediction (ndarray): predicted output after training
"""
self.eval() # PyToch module setting network to evaluation mode
if type(X) is np.ndarray:
X = torch.tensor(X, requires_grad=False).type(torch.FloatTensor)
elif type(X) is torch.Tensor and not X.is_cuda:
X = X.type(torch.FloatTensor).cuda(device, non_blocking=True)
# .cuda() call transfers the densor from the CPU to the GPU if that is the case.
# Non-blocking argument lets the caller bypas synchronization when necessary
with torch.no_grad(): # Causes operations to have no gradients
output = self.forward(X)
_, idx = torch.max(output, 1)
# We retrieve the tensor held by idx (.data), and map it to a cpu as an ndarray
idx = idx.data.cpu().numpy()