Untitled

 avatar
user_5706693
plain_text
a year ago
3.4 kB
11
Indexable
class CAE(nn.Module):
    def __init__(self, list_of_sizes, dropout_rate=0.5):
        super(CAE, self).__init__()
        self.list_sizes = list_of_sizes
        self.dropout_rate = dropout_rate
        self.encoder = self.encoder_layer()
        self.decoder = self.decoder_layer()
        self.initialize_weights()
    
    def encoder_layer(self):
        layers = []
        for i in range(1, len(self.list_sizes)):
            layers.append(nn.Conv1d(self.list_sizes[i-1], self.list_sizes[i], kernel_size=3, stride=1, padding=1))
            layers.append(nn.ReLU())
            layers.append(nn.Dropout(self.dropout_rate))
        return nn.Sequential(*layers)

    def decoder_layer(self):
        layers = []
        for i in range(len(self.list_sizes) - 1, 0, -1):
            layers.append(nn.ConvTranspose1d(self.list_sizes[i], self.list_sizes[i-1], kernel_size=3, stride=1, padding=1))
            layers.append(nn.ReLU())
            layers.append(nn.Dropout(self.dropout_rate))
        return nn.Sequential(*layers)

    def initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x
    
class basic_block(nn.Module):
    def __init__(self, in_channels, in_cnns, in_lstms, dropout_rate=0.5):
        super(basic_block, self).__init__()
        self.conv = nn.Conv1d(in_channels, in_cnns, kernel_size=3, stride=1, padding=1)
        self.bn = nn.BatchNorm1d(in_cnns)
        self.lstm1 = nn.LSTM(in_cnns, in_lstms, batch_first=True, bidirectional=True)
        # self.lstm2 = nn.LSTM(in_lstms, in_lstms, batch_first=True, bidirectional=True)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        x = self.conv(x)
        x = self.dropout(x)
        x = x.permute(1, 0)
        x = self.relu(self.bn(x))
        x, _ = self.lstm1(x)
        x = self.relu(x)
        x = self.pool(x)
        x = x.permute(1, 0)
        return x

class CAEL(nn.Module):
    def __init__(self, block, in_channel, in_caes, in_cnns, in_lstms): 
        super(CAEL, self).__init__()
        self.autoencoder = CAE(list_of_sizes=[in_channel, *in_caes], dropout_rate=0.1)
        self.layer1 = self.layer(block, in_caes[-1], in_cnns[0], in_lstms[0], dropout_rate=0.3)
        self.layer2 = self.layer(block, in_lstms[0], in_cnns[1], in_lstms[1], dropout_rate=0.3)
        self.layer3 = self.layer(block, in_lstms[1], in_cnns[2], in_lstms[2], dropout_rate=0.3)
        self.fc = nn.Linear(in_lstms[-1], in_channel)

    def layer(self, block, in_channel, in_cnn, in_lstm, dropout_rate):
        layers = []
        layers.append(block(in_channel, in_cnn, in_lstm, dropout_rate))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = x.permute(1, 0)
        x = self.autoencoder.encoder(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        
        x = x.permute(1, 0)
        out = self.fc(x)
        return out, x
Editor is loading...
Leave a Comment