File tree 4 files changed +15
-6
lines changed
tests/test_models/test_necks
4 files changed +15
-6
lines changed Original file line number Diff line number Diff line change @@ -22,7 +22,6 @@ class MixFFN(BaseModule):
22
22
The differences between MixFFN & FFN:
23
23
1. Use 1X1 Conv to replace Linear layer.
24
24
2. Introduce 3X3 Conv to encode positional information.
25
-
26
25
Args:
27
26
embed_dims (int): The feature dimension. Same as
28
27
`MultiheadAttention`. Defaults: 256.
@@ -94,7 +93,6 @@ class EfficientMultiheadAttention(MultiheadAttention):
94
93
95
94
This module is modified from MultiheadAttention which is a module from
96
95
mmcv.cnn.bricks.transformer.
97
-
98
96
Args:
99
97
embed_dims (int): The embedding dimension.
100
98
num_heads (int): Parallel attention heads.
@@ -291,7 +289,6 @@ class MixVisionTransformer(BaseModule):
291
289
This backbone is the implementation of `SegFormer: Simple and
292
290
Efficient Design for Semantic Segmentation with
293
291
Transformers <https://arxiv.org/abs/2105.15203>`_.
294
-
295
292
Args:
296
293
in_channels (int): Number of input channels. Default: 3.
297
294
embed_dims (int): Embedding dimension. Default: 768.
Original file line number Diff line number Diff line change @@ -101,7 +101,7 @@ def forward(self, inputs):
101
101
used_backbone_levels = len (laterals )
102
102
for i in range (used_backbone_levels - 1 , 0 , - 1 ):
103
103
prev_shape = laterals [i - 1 ].shape [2 :]
104
- laterals [i - 1 ] += resize (
104
+ laterals [i - 1 ] = laterals [ i - 1 ] + resize (
105
105
laterals [i ],
106
106
size = prev_shape ,
107
107
mode = 'bilinear' ,
Original file line number Diff line number Diff line change @@ -175,10 +175,11 @@ def forward(self, inputs):
175
175
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
176
176
# it cannot co-exist with `size` in `F.interpolate`.
177
177
if 'scale_factor' in self .upsample_cfg :
178
- laterals [i - 1 ] += resize (laterals [i ], ** self .upsample_cfg )
178
+ laterals [i - 1 ] = laterals [i - 1 ] + resize (
179
+ laterals [i ], ** self .upsample_cfg )
179
180
else :
180
181
prev_shape = laterals [i - 1 ].shape [2 :]
181
- laterals [i - 1 ] += resize (
182
+ laterals [i - 1 ] = laterals [ i - 1 ] + resize (
182
183
laterals [i ], size = prev_shape , ** self .upsample_cfg )
183
184
184
185
# build outputs
Original file line number Diff line number Diff line change @@ -17,3 +17,14 @@ def test_fpn():
17
17
assert outputs [1 ].shape == torch .Size ([1 , 64 , 28 , 28 ])
18
18
assert outputs [2 ].shape == torch .Size ([1 , 64 , 14 , 14 ])
19
19
assert outputs [3 ].shape == torch .Size ([1 , 64 , 7 , 7 ])
20
+
21
+ fpn = FPN (
22
+ in_channels ,
23
+ 64 ,
24
+ len (in_channels ),
25
+ upsample_cfg = dict (mode = 'nearest' , scale_factor = 2.0 ))
26
+ outputs = fpn (inputs )
27
+ assert outputs [0 ].shape == torch .Size ([1 , 64 , 56 , 56 ])
28
+ assert outputs [1 ].shape == torch .Size ([1 , 64 , 28 , 28 ])
29
+ assert outputs [2 ].shape == torch .Size ([1 , 64 , 14 , 14 ])
30
+ assert outputs [3 ].shape == torch .Size ([1 , 64 , 7 , 7 ])
You can’t perform that action at this time.
0 commit comments