Commit 9c4903d5 authored by Andrei-Claudiu Roibu's avatar Andrei-Claudiu Roibu 🖥
Browse files

updating doctring for tractSum

parents dcb3716f d8b4134c
......@@ -117,4 +117,5 @@ dmypy.json
# Other files
.vscode/
datasets/
files.txt
\ No newline at end of file
files.txt
jobscript.sge.sh
\ No newline at end of file
......@@ -8,10 +8,11 @@ setup(
maintainer='Andrei-Claudiu Roibu',
maintainer_email='andrei-claudiu.roibu@dtc.ox.ac.uk',
install_requires=[
'pip>=20.0.2',
'matplotlib>=3.1.1',
'nibabel>=2.5.1',
'numpy>=1.17.3',
'pandas>=0.25.3',
'pip',
'matplotlib',
'nibabel',
'numpy',
'pandas',
'torch',
],
)
"""Data Processing Functions
Description:
-------------
This folder contains several functions which, either on their own or included in larger pieces of software, perform processing tasks on the data.
Usage
-------------
To use content from this folder, import the functions and instantiate them as you wish to use them:
from utils.data_utils import function_name
"""
import os
from glob import glob
import numpy as np
......@@ -19,19 +34,16 @@ def dirReader(folder_location):
None
"""
out_file = open("files2.txt", 'w')
out_file = open("files.txt", 'w')
subDirectoryList = []
for directory in os.listdir(folder_location):
if os.path.isdir(os.path.join(folder_location, directory)):
filename = folder_location+directory
print(filename)
if os.access(filename, os.R_OK):
string = directory+'\n'
out_file.write(string)
subDirectoryList.append(directory)
else:
pass
return subDirectoryList
......@@ -43,7 +55,7 @@ def tractSum(folder_path):
This function also outputs the summed tract map as a Nifti (.nii.gz) file.
Args:
folder_path (string): A string containing the address of the required directory.
folder_location (str): A string containing the address of the required directory.
Returns:
None
......@@ -53,37 +65,52 @@ def tractSum(folder_path):
"""
tractMapName = 'tractsNorm.nii.gz'
tractMapName = 'tracts/tractsNorm.nii.gz'
sum_flag = False
subDirectoryList = dirReader(folder_path)
for directory in os.listdir(folder_path):
if os.path.isdir(os.path.join(folder_location, directory)):
viableSubDirectories = len(subDirectoryList)
counter = 0
tractedMapPath = folder_path + directory + '/' + tractMapName
tractedMapImg = nib.load(tractedMapPath)
tractedMap = tractedMapImg.get_fdata()
if not os.path.exists('/well/win/users/hsv459/functionmapper/datasets/dMRI'):
if not os.path.exists('/well/win/users/hsv459/functionmapper/datasets'):
os.mkdir('/well/win/users/hsv459/functionmapper/datasets')
os.mkdir('/well/win/users/hsv459/functionmapper/datasets/dMRI')
# the affine array stores the relationship between voxel coordinates in the image data array and coordinates in the reference space
for subDirectory in subDirectoryList:
tractedMapsPath = os.path.join(folder_location, str(subDirectory), 'dMRI/autoptx_preproc/tracts/')
tractedMapAffine = tractedMapImg.affine
sum_flag = False # This is a flat showing us if this is the first tracted to be summed
print("Summing the tract number: {}/{}".format(counter, viableSubDirectories))
for tract in os.listdir(tractedMapsPath):
if os.path.isdir(os.path.join(tractedMapsPath, tract)):
tractedMapPath = os.path.join(tractedMapsPath, tract, tractMapName)
tractedMapImg = nib.load(tractedMapPath)
tractedMap = tractedMapImg.get_fdata()
# the affine array stores the relationship between voxel coordinates in the image data array and coordinates in the reference space
tractedMapAffine = tractedMapImg.affine
if sum_flag == False:
tractedMapSum = np.copy(tractedMap)
else:
tractedMapSum = np.sum(tractedMapSum, tractedMap)
if sum_flag == False:
tractedMapSum = np.copy(tractedMap)
else:
tractedMapSum = np.sum(tractedMapSum, tractedMap)
else:
pass
tractedMapSumPath = '/well/win/users/hsv459/functionmapper/datasets/dMRI'
tractsSumName = str(subDirectory) + ".nii.gz"
tractedMapSumImg = nib.Nifti1Image(tractedMapSum, tractedMapAffine)
nib.save(tractedMapSumImg, os.path.join(tractedMapSumPath, tractsSumName))
tractedMapSumImg = nib.Nifti1Image(tractedMapSum, tractedMapAffine)
nib.save(tractedMapSumImg, os.path.join(folder_path, "tractsSum.nii.gz"))
counter +=1
return None
if __name__ == "__main__":
# dirReader("/home/andrei/directory_readear/")
folder_location = '/home/andrei/functionmapper/datasets/dMRI/23425368/'
folder_location = '/well/win-biobank/projects/imaging/data/data3/subjectsAll/'
tractSum(folder_location)
\ No newline at end of file
"""Deep Learning Modules
Description:
-------------
This folder contains several the building blocks for the translation neural network.
Usage
-------------
To use the modules, import the packages and instantiate any module/block class as you wish:
from utils.modules import modules as module_names
block = module_name.ConvolutionalBlock(parameters)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
# TODO: Currently, it appears that we are using constant size filters. We will need to adjust this in the network architecture, to allow it to encode/decode information!
class ConvolutionalBlock(nn.Module):
"""Parent class for a convolutional block.
This class represents a generic parent class for a convolutional encoder or decoder block.
The class represents a subclass/child class of nn.Module, inheriting its functionality.
Args:
parameters (dict): Contains information on kernel size, number of channels, number of filters, and if convolution is strided.
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'input_channels': 64
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
}
Returns:
torch.tensor: Output forward passed tensor
Raises:
None
"""
def __init__(self, parameters):
super(ConvolutionalBlock, self).__init__()
# We first calculate the amount of zero padding required (http://cs231n.github.io/convolutional-networks/)
padding_heigth = int((parameters['kernel_heigth'] - 1) / 2)
padding_width = int((parameters['kernel_width'] - 1) / 2)
self.output_channels = parameters['output_channels']
self.convolutional_layer = nn.Conv2d(
in_channels= parameters['input_channels'],
out_channels= parameters['output_channels'],
kernel_size= ( parameters['kernel_heigth'], parameters['kernel_width'] ),
stride= parameters['convolution_stride'],
padding= ( padding_heigth, padding_width )
)
self.activation = nn.PReLU()
# Other activation functions which might be interesting to test:
# More reading: https://arxiv.org/abs/1706.02515 ; https://mlfromscratch.com/activation-functions-explained/#/
# self.activation = nn.SELU()
# self.activation = nn.ELU()
# self.activation = nn.ReLU()
self.batchnormalization = nn.BatchNorm2d( num_features= parameters['output_channels'] )
if parameters['dropout'] > 0:
self.dropout_needed = True
self.dropout = nn.Dropout2d(parameters['dropout'])
else:
self.dropout_needed = False
def forward(self, X):
"""Forward pass
Function computing the forward pass through the convolutional layer.
The input to the function is a torch tensor of shape N (batch size) x C (number of channels) x H (input heigth) x W (input width)
Args:
X (torch.tensor): Input tensor, shape = (N x C x H x W)
Returns:
torch.tensor: Output forward passed tensor
Raises:
None
"""
return self.batchnormalization( self.activation( self.convolutional_layer(X) ) )
class EncoderBlock(ConvolutionalBlock):
"""Forward encoder path block for a U-net.
This class creates a simple encoder block following the architecture:
Convolution -> Non-linear Activation -> Batch Normalisation -> MaxPool
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'input_channels': 64
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
}
Returns:
Y (torch.tensor): Output forward passed tensor through the encoder block, with maxpool
Y_np (torch.tensor): Output forward passed tensor through the encoder block, with no pooling
pool_indices (torch.tensor): Indices for unpooling
Raises:
None
"""
def __init__ (self, parameters):
super(EncoderBlock, self).__init__(parameters)
self.maxpool = nn.MaxPool2d(
kernel_size= parameters['pool_kernel_size'],
stride= parameters['pool_stride'],
return_indices= True # This option returns the max index along with the outputs, useful for MaxUnpool2D
)
def forward(self, X):
"""Forward pass for U-net encoder block
Function computing the forward pass through the encoder block.
The input to the function is a torch tensor of shape N (batch size) x C (number of channels) x H (input heigth) x W (input width)
Args:
X (torch.tensor): Input tensor, shape = (N x C x H x W)
Returns:
Y (torch.tensor): Output forward passed tensor through the encoder block, with maxpool
Y_np (torch.tensor): Output forward passed tensor through the encoder block, with no pooling
pool_indices (torch.tensor): Indices for unpooling
Raises:
None
"""
Y_np = super(EncoderBlock, self).forward(X)
if self.dropout_needed:
Y_np = self.dropout(Y_np)
Y, pool_indices = self.maxpool(Y_np)
return Y, Y_np, pool_indices
class DecoderBlock(ConvolutionalBlock):
"""Forward decoder path block for a U-net.
This class creates a simple encoder block following the architecture:
Strided Convolution (or) MaxUnpool -> Convolution -> Non-linear Activation -> Batch Normalisation
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'input_channels': 64
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
'up_mode': 'upconv'
}
Returns:
Y (torch.tensor): Output forward passed tensor through the decoder block
Raises:
None
"""
def __init__ (self, parameters):
super(DecoderBlock, self).__init__(parameters)
up_mode = parameters['up_mode']
if up_mode == 'upconv': # Attention - this will need to be checked to confirm that it is working!
self.up = nn.ConvTranspose2d(
in_channels= parameters['input_channels'],
out_channels= parameters['output_channels'],
kernel_size= parameters['pool_kernel_size'],
stride= parameters['pool_stride'],
)
elif up_mode == 'upsample':
self.up = nn.Sequential(
nn.Upsample(
mode='bilinear',
scale_factor= 2,
),
nn.Conv2d(
in_channels= parameters['input_channels'],
out_channels= parameters['output_channels'],
kernel_size= 1,
)
)
elif up_mode == 'unpool':
self.up = nn.MaxUnpool2d(
kernel_size= parameters['pool_kernel_size'],
stride= parameters['pool_stride']
)
def forward(self, X, Y_encoder=None, pool_indices=None):
"""Forward pass for U-net decoder block
Function computing the forward pass through the decoder block.
The input to the function is a torch tensor of shape N (batch size) x C (number of channels) x H (input heigth) x W (input width).
A second input is a tensor for the skip connection, of shape (N x C x H x W); that defaults to None.
The function also takes the previous pool indices, for the unpooling operation; they aslo default to None.
Args:
X (torch.tensor): Input tensor, shape = (N x C x H x W)
Y_encoder (torch.tensor): Skip-connection tensor, shape = (N x C x H x W)
pool_indices (torch.tensor): Indices for unpooling
Returns:
Y (torch.tensor): Output forward passed tensor through the decoder block
Raises:
None
"""
upsampling = self.up(X, pool_indices)
if Y_encoder is None:
concatenation = upsampling
else:
concatenation = torch.cat((Y_encoder, upsampling), dim= 1)
Y = super(DecoderBlock, self).forward(concatenation)
if self.dropout_needed:
Y = self.dropout_needed(Y)
return Y
class ClassifierBlock(ConvolutionalBlock):
"""Classifier block for a U-net.
This class creates a simple classifier block following the architecture:
Args:
parameters (dict): Contains information relevant parameters
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'kernel_classification': 1
'input_channels': 1
'output_channels': 1
'convolution_stride': 1
'dropout': 0.2
'pool_kernel_size': 2
'pool_stride': 2
'up_mode': 'upconv'
'number_of_classes': 1
}
Returns:
Y (torch.tensor): Output forward passed tensor through the decoder block
Raises:
None
"""
def __init__(self, parameters):
super(ClassifierBlock, self).__init__()
self.convolutional_layer = nn.Conv2d(
in_channels= parameters['input_channels'],
out_channels= parameters['number_of_classes'],
kernel_size= parameters['kernel_classification'],
stride= parameters['convolution_stride'],
)
# TODO: Might be wworth looking at GANS for image generation, and adding padding
def forward(self, X):
"""Forward pass for U-net classifier block
Function computing the forward pass through the classifier block.
The input to the function is a torch tensor of shape N (batch size) x C (number of channels) x H (input heigth) x W (input width).
Args:
X (torch.tensor): Input tensor, shape = (N x C x H x W)
Returns:
logits (torch.tensor): Output logits from forward pass tensor through the classifier block
Raises:
None
"""
logits = self.convolutional_layer(X)
# TODO: Currently, this has no activation function. Might be worth considering adding a tanh activation function, similar to GANs
# For refernece : https://machinelearningmastery.com/how-to-implement-pix2pix-gan-models-from-scratch-with-keras/
# For refernece 2: https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html
return logits
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment