-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDBPNM.py
108 lines (96 loc) · 3.96 KB
/
DBPNM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from Models import *
# The DBPN-M model. It has no densely connected BP layers. Lightweight but not the best in performance
# By default, all layers uses PReLU activation function, according to the author's work.
class DBPNM:
def __init__(self, scale_factor=2, bias=True, bias_init='zeros'):
kernel_size = 0
stride = 0
padding = 0
if scale_factor == 2:
# default scaling factor of 2
kernel_size = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel_size = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel_size = 12
stride = 8
padding = 2
# Feature extraction stage
self.f0 = CONV(128, 3, 1, 1, bias, bias_init, True)
self.f1 = CONV(32, 1, 1, 0, bias, bias_init, True)
# Back Projection stage, DBPN-M has a total of 4 BP stages, last stage only has an up-projection
self.up1 = UpProjection(32, kernel_size, stride, padding, bias, bias_init)
self.down1 = DownProjection(32, kernel_size, stride, padding, bias, bias_init)
self.up2 = UpProjection(32, kernel_size, stride, padding, bias, bias_init)
self.down2 = DownProjection(32, kernel_size, stride, padding, bias, bias_init)
self.up3 = UpProjection(32, kernel_size, stride, padding, bias, bias_init)
self.down3 = DownProjection(32, kernel_size, stride, padding, bias, bias_init)
self.up4 = UpProjection(32, kernel_size, stride, padding, bias, bias_init)
# Reconstruction
self.reconstruction = CONV(3, 1, 1, 0, bias, bias_init, False)
def __call__(self, x):
# Feature Extraction
x = self.f0(x)
x = self.f1(x)
# BP stage
h = self.up1(x)
l = self.down1(h)
h = self.up2(l)
l = self.down2(h)
h = self.up3(l)
l = self.down3(h)
h = self.up4(l)
# Reconstruction stage
x = self.reconstruction(h)
return x
# Just for comparison. In general, this model will not be used in Super Resolution application
class DBPNM_WithoutEF:
def __init__(self, scale_factor, bias=True, bias_init='zeros'):
kernel_size = 0
stride = 0
padding = 0
if scale_factor == 2:
# default scaling factor of 2
kernel_size = 6
stride = 2
padding = 2
elif scale_factor == 4:
kernel_size = 8
stride = 4
padding = 2
elif scale_factor == 8:
kernel_size = 12
stride = 8
padding = 2
# Feature extraction stage
self.f0 = CONV(128, 3, 1, 1, bias, bias_init, True)
self.f1 = CONV(32, 1, 1, 0, bias, bias_init, True)
# Back Projection stage, DBPN-M has a total of 4 BP stages, last stage only has an up-projection
self.up1 = UpProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.down1 = DownProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.up2 = UpProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.down2 = DownProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.up3 = UpProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.down3 = DownProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
self.up4 = UpProjectionWithoutEF(32, kernel_size, stride, padding, bias, bias_init)
# Reconstruction
self.reconstruction = CONV(3, 1, 1, 0, bias, bias_init, False)
def __call__(self, x):
# Feature Extraction
x = self.f0(x)
x = self.f1(x)
# BP stage
h = self.up1(x)
l = self.down1(h)
h = self.up2(l)
l = self.down2(h)
h = self.up3(l)
l = self.down3(h)
h = self.up4(l)
# Reconstruction stage
x = self.reconstruction(h)
return x