Skip to content

Commit a672c05

Browse files
authored
[Fix] Remove the inplace operation in uper_head and fpn_neck (#1103)
* [Fix] Remove the inplace operation in uper_head * remove the inplace operation in fpn neck * fix conflict * increase the coverage
1 parent f8ed148 commit a672c05

File tree

4 files changed

+15
-6
lines changed

4 files changed

+15
-6
lines changed

Diff for: mmseg/models/backbones/mit.py

-3
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ class MixFFN(BaseModule):
2222
The differences between MixFFN & FFN:
2323
1. Use 1X1 Conv to replace Linear layer.
2424
2. Introduce 3X3 Conv to encode positional information.
25-
2625
Args:
2726
embed_dims (int): The feature dimension. Same as
2827
`MultiheadAttention`. Defaults: 256.
@@ -94,7 +93,6 @@ class EfficientMultiheadAttention(MultiheadAttention):
9493
9594
This module is modified from MultiheadAttention which is a module from
9695
mmcv.cnn.bricks.transformer.
97-
9896
Args:
9997
embed_dims (int): The embedding dimension.
10098
num_heads (int): Parallel attention heads.
@@ -291,7 +289,6 @@ class MixVisionTransformer(BaseModule):
291289
This backbone is the implementation of `SegFormer: Simple and
292290
Efficient Design for Semantic Segmentation with
293291
Transformers <https://arxiv.org/abs/2105.15203>`_.
294-
295292
Args:
296293
in_channels (int): Number of input channels. Default: 3.
297294
embed_dims (int): Embedding dimension. Default: 768.

Diff for: mmseg/models/decode_heads/uper_head.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def forward(self, inputs):
101101
used_backbone_levels = len(laterals)
102102
for i in range(used_backbone_levels - 1, 0, -1):
103103
prev_shape = laterals[i - 1].shape[2:]
104-
laterals[i - 1] += resize(
104+
laterals[i - 1] = laterals[i - 1] + resize(
105105
laterals[i],
106106
size=prev_shape,
107107
mode='bilinear',

Diff for: mmseg/models/necks/fpn.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -175,10 +175,11 @@ def forward(self, inputs):
175175
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
176176
# it cannot co-exist with `size` in `F.interpolate`.
177177
if 'scale_factor' in self.upsample_cfg:
178-
laterals[i - 1] += resize(laterals[i], **self.upsample_cfg)
178+
laterals[i - 1] = laterals[i - 1] + resize(
179+
laterals[i], **self.upsample_cfg)
179180
else:
180181
prev_shape = laterals[i - 1].shape[2:]
181-
laterals[i - 1] += resize(
182+
laterals[i - 1] = laterals[i - 1] + resize(
182183
laterals[i], size=prev_shape, **self.upsample_cfg)
183184

184185
# build outputs

Diff for: tests/test_models/test_necks/test_fpn.py

+11
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,14 @@ def test_fpn():
1717
assert outputs[1].shape == torch.Size([1, 64, 28, 28])
1818
assert outputs[2].shape == torch.Size([1, 64, 14, 14])
1919
assert outputs[3].shape == torch.Size([1, 64, 7, 7])
20+
21+
fpn = FPN(
22+
in_channels,
23+
64,
24+
len(in_channels),
25+
upsample_cfg=dict(mode='nearest', scale_factor=2.0))
26+
outputs = fpn(inputs)
27+
assert outputs[0].shape == torch.Size([1, 64, 56, 56])
28+
assert outputs[1].shape == torch.Size([1, 64, 28, 28])
29+
assert outputs[2].shape == torch.Size([1, 64, 14, 14])
30+
assert outputs[3].shape == torch.Size([1, 64, 7, 7])

0 commit comments

Comments
 (0)