-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathmodels.py
180 lines (144 loc) · 5.65 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import torch
import torch.nn as nn
import geffnet
import torchvision.models as models
sigmoid = torch.nn.Sigmoid()
# Gradient reversal class
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg() * 0.1
# Gradient reversal function
def grad_reverse(x):
return GradReverse.apply(x)
# EfficientNet feature extractor
class enetv2(nn.Module):
def __init__(self, enet_type, load_pretrained=True):
super(enetv2, self).__init__()
self.enet = geffnet.create_model(enet_type.replace('-', '_'), pretrained=True)
self.dropout = nn.Dropout(0.5)
in_ch = self.enet.classifier.in_features
self.enet.classifier = nn.Identity()
def extract(self, x):
x = self.enet(x)
return x
def forward(self, x):
# Assigning feature representation to new variable to allow it to be pulled out and passed into auxiliary head
feat_out = self.extract(x).squeeze(-1).squeeze(-1)
return feat_out
# ResNet-101 feature extractor
class ResNet101(nn.Module):
def __init__(self, pretrained=True):
super(ResNet101, self).__init__()
self.enet = models.resnet101(pretrained=pretrained)
self.dropouts = nn.Dropout(0.5)
in_ch = self.enet.fc.in_features
self.enet.fc = nn.Identity()
def extract(self, x):
x = self.enet(x)
return x
def forward(self, x):
# Assigning feature representation to new variable to allow it to be pulled out and passed into auxiliary head
feat_out = self.extract(x).squeeze(-1).squeeze(-1)
return feat_out
# ResNeXt-101 feature extractor
class ResNext101(nn.Module):
def __init__(self, pretrained=True):
super(ResNext101, self).__init__()
self.enet = models.resnext101_32x8d(pretrained=pretrained)
self.dropouts = nn.Dropout(0.5)
in_ch = self.enet.fc.in_features
self.enet.fc = nn.Identity()
def extract(self, x):
x = self.enet(x)
return x
def forward(self, x):
# Assigning feature representation to new variable to allow it to be pulled out and passed into auxiliary head
feat_out = self.extract(x).squeeze(-1).squeeze(-1)
return feat_out
# Densenet feature extractor
class DenseNet(nn.Module):
def __init__(self, pretrained=True):
super(DenseNet, self).__init__()
self.enet = models.densenet161(pretrained=pretrained)
self.dropouts = nn.Dropout(0.5)
in_ch = self.enet.classifier.in_features
self.enet.classifier = nn.Identity()
def extract(self, x):
x = self.enet(x)
return x
def forward(self, x):
# Assigning feature representation to new variable to allow it to be pulled out and passed into auxiliary head
feat_out = self.extract(x).squeeze(-1).squeeze(-1)
return feat_out
# Inception-V3 feature extractor
class Inception(nn.Module):
def __init__(self, pretrained=True):
super(Inception, self).__init__()
self.enet = models.inception_v3(pretrained=pretrained)
self.enet.aux_logits = False
self.dropouts = nn.Dropout(0.5)
in_ch = self.enet.fc.in_features
self.enet.fc = nn.Identity()
def extract(self, x):
x = self.enet(x)
return x
def forward(self, x):
# Assigning feature representation to new variable to allow it to be pulled out and passed into auxiliary head
feat_out = self.extract(x).squeeze(-1).squeeze(-1)
return feat_out
# Main classification head
class ClassificationHead(nn.Module):
# Define model elements
def __init__(self, out_dim, in_ch=1536):
super(ClassificationHead, self).__init__()
self.layer = nn.Linear(in_ch, out_dim)
# Softmax function
self.activation = nn.Softmax(dim=1) # .Sigmoid()
self.dropout = nn.Dropout(0.5)
# Forward propagate input
def forward(self, feat_out):
# Feature map passed into fully connected layer to get logits
x = self.layer(self.dropout(feat_out)) # .squeeze()
# Returning logits
return x
# Auxiliary head
class AuxiliaryHead(nn.Module):
# Define model elements
def __init__(self, num_aux, in_ch=1536):
super(AuxiliaryHead, self).__init__()
# Fully connected layer
self.layer = nn.Linear(in_ch, num_aux)
# Softmax function
self.activation = nn.Softmax(dim=1) # .Sigmoid()
# Forward propagate input
def forward(self, x_aux):
# Feature map passed into fully connected layer to get logits
x_aux = self.layer(x_aux).squeeze()
# Probabilities output by using sigmoid activation
px_aux = self.activation(x_aux)
# Returning logits and probabilities as tuple
return x_aux, px_aux
# Deeper auxiliary head (added fully connected layer)
class AuxiliaryHead2(nn.Module):
# Define model elements
def __init__(self, num_aux, in_ch=1536):
super(AuxiliaryHead2, self).__init__()
# Fully connected layer with 2 units
self.layer = nn.Sequential(
nn.Linear(in_ch, 128),
nn.ReLU(),
nn.Linear(128, num_aux))
# Softmax function
self.activation = nn.Softmax(dim=1) # .Sigmoid()
# Forward propagate input
def forward(self, x_aux):
# Feature map passed into fully connected layer to get logits
x_aux = self.layer(x_aux).squeeze()
# Probabilities output by using sigmoid activation
px_aux = self.activation(x_aux)
# Returning logits and probabilities as tuple
return x_aux, px_aux