Changes
Page history
Update cnn_num_layer_test
authored
Mar 26, 2021
by
Kentaro Mogushi
Show whitespace changes
Inline
Side-by-side
home/cnn_num_layer_test.md
View page @
370477b1
...
@@ -6,6 +6,10 @@ I compare which model of the CNN is better using either of 3 or 4.
...
@@ -6,6 +6,10 @@ I compare which model of the CNN is better using either of 3 or 4.
-
process of the regression
-
process of the regression
The score is negative in the testing set (seems not good)
The score is negative in the testing set (seems not good)

```
```
0 0.710105836391449 0.5692983269691467 0.18870583176612854 0.7803471684455872 0.7499625086784363
0 0.710105836391449 0.5692983269691467 0.18870583176612854 0.7803471684455872 0.7499625086784363
1 0.6804407835006714 0.587370753288269 -0.1154908537864685 0.8309608101844788 0.7888878583908081
1 0.6804407835006714 0.587370753288269 -0.1154908537864685 0.8309608101844788 0.7888878583908081
...
@@ -49,97 +53,3 @@ The score is negative in the testing set (seems not good)
...
@@ -49,97 +53,3 @@ The score is negative in the testing set (seems not good)
## Model
## Model
```
class DeepPixel(torch.nn.Module):
def __init__(self, in_channels, kernel_size=(7,7), last_kernel=(22,22), num_layer=4):
super().__init__()
# self.input_conv = torch.nn.Sequential(
# torch.nn.Conv2d(in_channels, in_channels, kernel_size=7, stride=1, padding=3),
# torch.nn.BatchNorm2d(in_channels),
# torch.nn.Tanh()
# )
# check the input layer with different settings
self.input_conv = torch.nn.Sequential(
torch.nn.Conv2d(in_channels, in_channels, kernel_size=7, stride=1, padding=3),
torch.nn.BatchNorm2d(in_channels),
torch.nn.ReLU()
)
self.downsampler = torch.nn.Sequential()
self.downsampler.add_module('CONV_1', torch.nn.Conv2d(in_channels, 8, kernel_size=kernel_size, stride=2, padding=3))
self.downsampler.add_module('BN_1', torch.nn.BatchNorm2d(8))
self.downsampler.add_module('TANH_1', torch.nn.ReLU())
self.downsampler.add_module('CONV_2', torch.nn.Conv2d(8, 16, kernel_size=kernel_size, stride=2, padding=3))
self.downsampler.add_module('BN_2', torch.nn.BatchNorm2d(16))
self.downsampler.add_module('TANH_2', torch.nn.ReLU())
self.downsampler.add_module('CONV_3', torch.nn.Conv2d(16, 32, kernel_size=kernel_size, stride=2, padding=3))
self.downsampler.add_module('BN_3', torch.nn.BatchNorm2d(32))
self.downsampler.add_module('TANH_3', torch.nn.ReLU())
if num_layer == 4:
self.downsampler.add_module('CONV_4', torch.nn.Conv2d(32, 64, kernel_size=kernel_size, stride=2, padding=3))
self.downsampler.add_module('BN_4', torch.nn.BatchNorm2d(64))
self.downsampler.add_module('TANH_4', torch.nn.ReLU())
else:
pass
self.upsampler = torch.nn.Sequential()
if num_layer == 4:
self.upsampler.add_module(
'CONVTRANS_1', torch.nn.ConvTranspose2d(64, 32, kernel_size=kernel_size, stride=2, padding=3, output_padding=1))
self.upsampler.add_module('BN_1', torch.nn.BatchNorm2d(32))
self.upsampler.add_module('TANH_1', torch.nn.ReLU())
else:
pass
self.upsampler.add_module(
'CONVTRANS_2', torch.nn.ConvTranspose2d(32, 16, kernel_size=kernel_size, stride=2, padding=3, output_padding=1))
self.upsampler.add_module('BN_2', torch.nn.BatchNorm2d(16))
self.upsampler.add_module('TANH_2', torch.nn.ReLU())
self.upsampler.add_module(
'CONVTRANS_3', torch.nn.ConvTranspose2d(16, 8, kernel_size=kernel_size, stride=2, padding=3, output_padding=1))
self.upsampler.add_module('BN_3', torch.nn.BatchNorm2d(8))
self.upsampler.add_module('TANH_3', torch.nn.ReLU())
self.upsampler.add_module(
'CONVTRANS_4', torch.nn.ConvTranspose2d(8, in_channels, kernel_size=kernel_size, stride=2, padding=3, output_padding=1))
self.upsampler.add_module('BN_4', torch.nn.BatchNorm2d(in_channels))
self.upsampler.add_module('TANH_4', torch.nn.ReLU())
self.output_conv = torch.nn.Conv2d(in_channels, 1, kernel_size=last_kernel, stride=1, padding=3) # for stft
# self.output_conv2 = torch.nn.Conv2d(1, 1, kernel_size=(7, 7), stride=1, padding=3) # for stft
#self.output_conv = torch.nn.Conv2d(in_channels, 1, kernel_size=(19, 23), stride=1, padding=3) # for qtrans
#self.output_conv = torch.nn.Conv2d(in_channels, 1, kernel_size=(19, 23), stride=1, padding=3) # for qtrans magnitude
self.positive = torch.nn.ReLU()
def forward(self, x):
#print('input conv')
x = self.input_conv(x)
#print(x.shape)
#print('downsampler')
x = self.downsampler(x)
#print(x.shape)
#print('upsampler')
x = self.upsampler(x)
#print(x.shape)
#print('outut conv')
x = self.output_conv(x)
#print(x.shape)
# x = self.positive(x) ####-----------------
#x = self.output_conv2(x)
#x = self.positive(x)
# print(x.shape)
#x = x.view(x.shape[0], -1)
return x
```