@@ -19,6 +19,306 @@ import torch.nn.functional as F
# TODO: Currently, it appears that we are using constant size filters. We will need to adjust this in the network architecture, to allow it to encode/decode information!
# CycleGAN 3D Generator Autoencoder:
classResNetEncoderBlock3D(nn.Module):
"""Parent class for a 3D convolutional block.
This class represents a generic parent class for a convolutional 3D encoder or decoder block.
The class represents a subclass/child class of nn.Module, inheriting its functionality.
Args:
parameters (dict): Contains information on kernel size, number of channels, number of filters, and if convolution is strided.
parameters = {
'kernel_heigth': 5
'kernel_width': 5
'kernel_depth' : 5
'input_channels': 64
'output_channels': 64
'convolution_stride': 1
'dropout': 0.2
}
Returns:
torch.tensor: Output forward passed tensor
"""
def__init__(self,parameters):
super(ResNetEncoderBlock3D,self).__init__()
# We first calculate the amount of zero padding required (http://cs231n.github.io/convolutional-networks/)