From cdc33868bb10fc6a03a8b8f131d2f01cc0b54a34 Mon Sep 17 00:00:00 2001 From: Wuziyi616 Date: Sun, 8 Aug 2021 12:21:04 +0800 Subject: [PATCH] add unit test --- tests/test_models/test_segmentors.py | 72 ++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/test_models/test_segmentors.py b/tests/test_models/test_segmentors.py index c17a183bf..3b46a86a4 100644 --- a/tests/test_models/test_segmentors.py +++ b/tests/test_models/test_segmentors.py @@ -231,3 +231,75 @@ def test_paconv_ssg(): results = self.forward(return_loss=False, **data_dict) assert results[0]['semantic_mask'].shape == torch.Size([200]) assert results[1]['semantic_mask'].shape == torch.Size([100]) + + +def test_paconv_cuda_ssg(): + if not torch.cuda.is_available(): + pytest.skip('test requires GPU and torch+cuda') + + set_random_seed(0, True) + paconv_cuda_ssg_cfg = _get_segmentor_cfg( + 'paconv/paconv_cuda_ssg_8x8_cosine_200e_s3dis_seg-3d-13class.py') + # for GPU memory consideration + paconv_cuda_ssg_cfg.backbone.num_points = (256, 64, 16, 4) + paconv_cuda_ssg_cfg.test_cfg.num_points = 32 + self = build_segmentor(paconv_cuda_ssg_cfg).cuda() + points = [torch.rand(1024, 9).float().cuda() for _ in range(2)] + img_metas = [dict(), dict()] + gt_masks = [torch.randint(0, 13, (1024, )).long().cuda() for _ in range(2)] + + # test forward_train + losses = self.forward_train(points, img_metas, gt_masks) + assert losses['decode.loss_sem_seg'].item() >= 0 + assert losses['regularize.loss_regularize'].item() >= 0 + + # test forward function + set_random_seed(0, True) + data_dict = dict( + points=points, img_metas=img_metas, pts_semantic_mask=gt_masks) + forward_losses = self.forward(return_loss=True, **data_dict) + assert np.allclose(losses['decode.loss_sem_seg'].item(), + forward_losses['decode.loss_sem_seg'].item()) + assert np.allclose(losses['regularize.loss_regularize'].item(), + forward_losses['regularize.loss_regularize'].item()) + + # test loss with ignore_index + ignore_masks = [torch.ones_like(gt_masks[0]) * 13 for _ in range(2)] + losses = self.forward_train(points, img_metas, ignore_masks) + assert losses['decode.loss_sem_seg'].item() == 0 + + # test simple_test + self.eval() + with torch.no_grad(): + scene_points = [ + torch.randn(200, 6).float().cuda() * 3.0, + torch.randn(100, 6).float().cuda() * 2.5 + ] + results = self.simple_test(scene_points, img_metas) + assert results[0]['semantic_mask'].shape == torch.Size([200]) + assert results[1]['semantic_mask'].shape == torch.Size([100]) + + # test forward function calling simple_test + with torch.no_grad(): + data_dict = dict(points=[scene_points], img_metas=[img_metas]) + results = self.forward(return_loss=False, **data_dict) + assert results[0]['semantic_mask'].shape == torch.Size([200]) + assert results[1]['semantic_mask'].shape == torch.Size([100]) + + # test aug_test + with torch.no_grad(): + scene_points = [ + torch.randn(2, 200, 6).float().cuda() * 3.0, + torch.randn(2, 100, 6).float().cuda() * 2.5 + ] + img_metas = [[dict(), dict()], [dict(), dict()]] + results = self.aug_test(scene_points, img_metas) + assert results[0]['semantic_mask'].shape == torch.Size([200]) + assert results[1]['semantic_mask'].shape == torch.Size([100]) + + # test forward function calling aug_test + with torch.no_grad(): + data_dict = dict(points=scene_points, img_metas=img_metas) + results = self.forward(return_loss=False, **data_dict) + assert results[0]['semantic_mask'].shape == torch.Size([200]) + assert results[1]['semantic_mask'].shape == torch.Size([100])