-
Notifications
You must be signed in to change notification settings - Fork 0
/
nn.py
105 lines (88 loc) · 3.38 KB
/
nn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# Copyright (C) 2018 Elvis Yu-Jing Lin <[email protected]>
#
# This work is licensed under the MIT License. To view a copy of this license,
# visit https://opensource.org/licenses/MIT.
"""Network components."""
import torch.nn as nn
from switchable_norm import SwitchNorm1d, SwitchNorm2d
def add_normalization_1d(layers, fn, n_out):
if fn == 'none':
pass
elif fn == 'batchnorm':
layers.append(nn.BatchNorm1d(n_out))
elif fn == 'instancenorm':
layers.append(Unsqueeze(-1))
layers.append(nn.InstanceNorm1d(n_out, affine=True))
layers.append(Squeeze(-1))
elif fn == 'switchnorm':
layers.append(SwitchNorm1d(n_out))
else:
raise Exception('Unsupported normalization: ' + str(fn))
return layers
def add_normalization_2d(layers, fn, n_out):
if fn == 'none':
pass
elif fn == 'batchnorm':
layers.append(nn.BatchNorm2d(n_out))
elif fn == 'instancenorm':
layers.append(nn.InstanceNorm2d(n_out, affine=True))
elif fn == 'switchnorm':
layers.append(SwitchNorm2d(n_out))
else:
raise Exception('Unsupported normalization: ' + str(fn))
return layers
def add_activation(layers, fn):
if fn == 'none':
pass
elif fn == 'relu':
layers.append(nn.ReLU())
elif fn == 'lrelu':
layers.append(nn.LeakyReLU())
elif fn == 'sigmoid':
layers.append(nn.Sigmoid())
elif fn == 'tanh':
layers.append(nn.Tanh())
else:
raise Exception('Unsupported activation function: ' + str(fn))
return layers
class Squeeze(nn.Module):
def __init__(self, dim):
super(Squeeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.squeeze(self.dim)
class Unsqueeze(nn.Module):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward(self, x):
return x.unsqueeze(self.dim)
class LinearBlock(nn.Module):
def __init__(self, n_in, n_out, norm_fn='none', acti_fn='none'):
super(LinearBlock, self).__init__()
layers = [nn.Linear(n_in, n_out, bias=(norm_fn=='none'))]
layers = add_normalization_1d(layers, norm_fn, n_out)
layers = add_activation(layers, acti_fn)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Conv2dBlock(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride=1, padding=0,
norm_fn=None, acti_fn=None):
super(Conv2dBlock, self).__init__()
layers = [nn.Conv2d(n_in, n_out, kernel_size, stride=stride, padding=padding, bias=(norm_fn=='none'))]
layers = add_normalization_2d(layers, norm_fn, n_out)
layers = add_activation(layers, acti_fn)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ConvTranspose2dBlock(nn.Module):
def __init__(self, n_in, n_out, kernel_size, stride=1, padding=0,
norm_fn=False, acti_fn=None):
super(ConvTranspose2dBlock, self).__init__()
layers = [nn.ConvTranspose2d(n_in, n_out, kernel_size, stride=stride, padding=padding, bias=(norm_fn=='none'))]
layers = add_normalization_2d(layers, norm_fn, n_out)
layers = add_activation(layers, acti_fn)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)