Commit e1f84662 authored by Andrei Roibu's avatar Andrei Roibu
Browse files

created a generic convolutional block

parent 2ca55334
"""Deep Learning Modules
Description:
-------------
This folder contains several the building blocks for the translation neural network.
Usage
-------------
To use the modules, import the packages and instantiate any module/block class as you wish:
from utils.modules import modules as module_names
block = module_name.ConvolutionalBlock(parameters)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvolutionalBlock(nn.Module):
"""Parent class for a convolutional block.
This class represents a generic parent class for a convolutional encoder or decoder block.
The class represents a subclass/child class of nn.Module, inheriting its functionality.
Args:
parameters (dict): Contains information on kernel size, number of channels, number of filters, and if convolution is strided.
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'input_channels': 64
'output_channels': 64
'convolution_stride': 1
'dropout': 1
}
Returns:
G (torch.tensor): Output forward passed tensor
Raises:
None
"""
def __init__(self, parameters):
super(ConvolutionalBlock, self).__init__()
# We first calculate the amount of zero padding required (http://cs231n.github.io/convolutional-networks/)
padding_heigth = int((parameters['kernel_heigth'] - 1) / 2)
padding_width = int((parameters['kernel_width'] - 1) / 2)
self.output_channels = parameters['output_channels']
self.convolutional_layer = nn.Conv2d(
in_channels= parameters['input_channels'],
out_channels= parameters['output_channels'],
kernel_size= ( parameters['kernel_heigth'], parameters['kernel_width'] ),
stride= parameters['convolution_stride'],
padding= ( padding_heigth, padding_width )
)
self.activation = nn.PReLU()
# Other activation functions which might be interesting to test:
# More reading: https://arxiv.org/abs/1706.02515 ; https://mlfromscratch.com/activation-functions-explained/#/
# self.activation = nn.SELU()
# self.activation = nn.ELU()
# self.activation = nn.ReLU()
self.batchnormalization = nn.BatchNorm2d( num_features= parameters['output_channels'] )
if parameters['dropout'] > 0:
self.dropout_needed = True
self.dropout = nn.Dropout2d(parameters['dropout'])
else:
self.dropout_needed = False
def forward(self, X):
"""Forward pass
Function computing the forward pass through the convolutional layer.
The input to the function is a torch tensor of shape N (batch size) x C (number of channels) x H (input heigth) x W (input width)
Args:
X (torch.tensor): Input tensor, shape = (N x C x H x W)
Returns:
torch.tensor: Output forward passed tensor
Raises:
None
"""
return self.batchnormalization( self.activation( self.convolutional_layer(X) ) )
class EncoderBlock(ConvolutionalBlock):
def __init__ (self, parameters):
pass
def forward(self, X):
pass
class DecoderBlock(ConvolutionalBlock):
def __init__ (self, parameters):
pass
def forward(self, X):
pass
class ClassifierBlock(ConvolutionalBlock):
def __init__(self, parameters):
pass
def forward(self, X):
pass
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment