Skip to content

[Fix]Fix the deprecation of np.float and fix ci configuration #2451

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Merged
merged 14 commits into from
Jan 1, 2023
36 changes: 16 additions & 20 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ jobs:
coverage xml
coverage report -m
# timm from v0.6.11 requires torch>=1.7
if: ${{matrix.torch >= '1.7.0'}}
if: ${{matrix.torch != '1.5.1' && matrix.torch != '1.6.0'}}
- name: Skip timm unittests and generate coverage report
run: |
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
coverage xml
coverage report -m
if: ${{matrix.torch < '1.7.0'}}
if: ${{matrix.torch == '1.5.1' || matrix.torch == '1.6.0'}}

build_cuda101:
runs-on: ubuntu-18.04
Expand Down Expand Up @@ -123,15 +123,13 @@ jobs:
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install Pillow
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
python -m pip install -U openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
Expand All @@ -146,13 +144,13 @@ jobs:
coverage xml
coverage report -m
# timm from v0.6.11 requires torch>=1.7
if: ${{matrix.torch >= '1.7.0'}}
if: ${{matrix.torch != '1.5.1+cu101' && matrix.torch != '1.6.0+cu101'}}
- name: Skip timm unittests and generate coverage report
run: |
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
coverage xml
coverage report -m
if: ${{matrix.torch < '1.7.0'}}
if: ${{matrix.torch == '1.5.1+cu101' || matrix.torch == '1.6.0+cu101'}}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1.0.10
with:
Expand All @@ -163,10 +161,12 @@ jobs:
fail_ci_if_error: false

build_cuda102:
env:
LC_ALL: C.UTF-8
LANG: C.UTF-8
runs-on: ubuntu-18.04
container:
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel

strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9]
Expand All @@ -175,7 +175,6 @@ jobs:
- torch: 1.9.0+cu102
torch_version: torch1.9
torchvision: 0.10.0+cu102

steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
Expand All @@ -191,15 +190,13 @@ jobs:
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
apt-get clean
rm -rf /var/lib/apt/lists/*
- name: Install Pillow
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.torch_version}}/index.html
python -m pip install openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
Expand Down Expand Up @@ -256,15 +253,13 @@ jobs:
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install system dependencies
run: apt-get update && apt-get install -y ffmpeg libturbojpeg ninja-build
- name: Install Pillow
run: python -m pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmseg dependencies
run: |
python -V
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu116/${{matrix.torch_version}}/index.html
python -m pip install openmim
mim install mmcv-full
python -m pip install -r requirements.txt
python -c 'import mmcv; print(mmcv.__version__)'
- name: Build and install
Expand Down Expand Up @@ -301,14 +296,15 @@ jobs:
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
- name: Install MMCV
run: |
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
pip install -U openmim
mim install mmcv-full
- name: Install unittest dependencies
run: pip install -r requirements/tests.txt -r requirements/optional.txt
- name: Build and install
run: pip install -e .
- name: Run unittests
run: |
python -m pip install 'timm<0.6.11'
python -m pip install timm
coverage run --branch --source mmseg -m pytest tests/
- name: Generate coverage report
run: |
Expand Down
24 changes: 12 additions & 12 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def get_confusion_matrix(pred_label, label, num_classes, ignore_index):
def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
Expand All @@ -48,7 +48,7 @@ def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
def legacy_mean_dice(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
Expand All @@ -69,7 +69,7 @@ def legacy_mean_fscore(results,
beta=1):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
Expand Down Expand Up @@ -100,7 +100,7 @@ def test_metrics():
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
# Test the correctness of the implementation of mDice calculation.
Expand All @@ -110,7 +110,7 @@ def test_metrics():
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(dice, dice_l)
# Test the correctness of the implementation of mDice calculation.
Expand All @@ -120,7 +120,7 @@ def test_metrics():
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
Expand All @@ -135,7 +135,7 @@ def test_metrics():
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
'Dice'], ret_metrics['Precision'], ret_metrics[
'Recall'], ret_metrics['Fscore']
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
assert np.allclose(dice, dice_l)
Expand Down Expand Up @@ -228,7 +228,7 @@ def test_mean_iou():
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)

Expand All @@ -254,7 +254,7 @@ def test_mean_dice():
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, dice_l)

Expand All @@ -280,7 +280,7 @@ def test_mean_fscore():
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
Expand All @@ -291,7 +291,7 @@ def test_mean_fscore():
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index, beta=2)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
Expand Down Expand Up @@ -346,6 +346,6 @@ def save_arr(input_arrays: list, title: str, is_image: bool, dir: str):
'Acc'], ret_metrics['IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, labels, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(all_acc, all_acc_l)
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)