From 522538e02c72e866b6347f80b984b849aa9c5fad Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 13 Jun 2023 11:01:04 +0800 Subject: [PATCH 1/5] fix --- .../codebase/mmseg/models/segmentors/base.py | 22 +++++++++++++++++-- .../segmentors/cascade_encoder_decoder.py | 5 ++--- .../models/segmentors/encoder_decoder.py | 20 +++-------------- .../test_mmseg/test_mmseg_models.py | 21 +++++++++++++++--- tests/test_codebase/test_mmseg/utils.py | 3 ++- 5 files changed, 45 insertions(+), 26 deletions(-) diff --git a/mmdeploy/codebase/mmseg/models/segmentors/base.py b/mmdeploy/codebase/mmseg/models/segmentors/base.py index 6462d5b2e9..c754e3e161 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/base.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/base.py @@ -2,7 +2,7 @@ from mmseg.structures import SegDataSample from mmdeploy.core import FUNCTION_REWRITER, mark -from mmdeploy.utils import is_dynamic_shape +from mmdeploy.utils import get_codebase_config, is_dynamic_shape @FUNCTION_REWRITER.register_rewriter( @@ -47,4 +47,22 @@ def __mark_input(inputs): for data_sample in data_samples: data_sample.set_field( name='img_shape', value=img_shape, field_type='metainfo') - return self.predict(inputs, data_samples) + seg_logit = self.predict(inputs, data_samples) + + # mark seg_head + @mark('decode_head', outputs=['output']) + def __mark_seg_logit(seg_logit): + return seg_logit + + ctx = FUNCTION_REWRITER.get_context() + with_argmax = get_codebase_config(ctx.cfg).get('with_argmax', True) + # deal with out_channels=1 with two classes + if seg_logit.shape[1] == 1: + seg_logit = seg_logit.sigmoid() + seg_pred = seg_logit > self.decode_head.threshold + seg_pred = seg_pred.to(seg_logit.device) + else: + seg_pred = __mark_seg_logit(seg_logit) + if with_argmax: + seg_pred = seg_pred.argmax(dim=1, keepdim=True) + return seg_pred diff --git a/mmdeploy/codebase/mmseg/models/segmentors/cascade_encoder_decoder.py b/mmdeploy/codebase/mmseg/models/segmentors/cascade_encoder_decoder.py index ad8d35b81f..05b3bf9320 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/cascade_encoder_decoder.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/cascade_encoder_decoder.py @@ -17,7 +17,7 @@ def cascade_encoder_decoder__predict(self, inputs, data_samples, **kwargs): data_samples (SampleList): The seg data samples. Returns: - torch.Tensor: Output segmentation map pf shape [N, 1, H, W]. + torch.Tensor: Output segmentation logits of shape [N, C, H, W]. """ batch_img_metas = [] for data_sample in data_samples: @@ -28,5 +28,4 @@ def cascade_encoder_decoder__predict(self, inputs, data_samples, **kwargs): out = self.decode_head[i].forward(x, out) seg_logit = self.decode_head[-1].predict(x, out, batch_img_metas, self.test_cfg) - seg_pred = seg_logit.argmax(dim=1, keepdim=True) - return seg_pred + return seg_logit diff --git a/mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py b/mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py index 312c0fbfeb..e7a8d1f592 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from mmdeploy.core import FUNCTION_REWRITER, mark -from mmdeploy.utils import get_codebase_config +from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( @@ -18,24 +17,11 @@ def encoder_decoder__predict(self, inputs, data_samples, **kwargs): data_samples (SampleList): The seg data samples. Returns: - torch.Tensor: Output segmentation map pf shape [N, 1, H, W]. + torch.Tensor: Output segmentation logits of shape [N, C, H, W]. """ batch_img_metas = [] for data_sample in data_samples: batch_img_metas.append(data_sample.metainfo) x = self.extract_feat(inputs) seg_logit = self.decode_head.predict(x, batch_img_metas, self.test_cfg) - - ctx = FUNCTION_REWRITER.get_context() - if get_codebase_config(ctx.cfg).get('with_argmax', True) is False: - return seg_logit - - # mark seg_head - @mark('decode_head', outputs=['output']) - def __mark_seg_logit(seg_logit): - return seg_logit - - seg_logit = __mark_seg_logit(seg_logit) - - seg_pred = seg_logit.argmax(dim=1, keepdim=True) - return seg_pred + return seg_logit diff --git a/tests/test_codebase/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py index b5704b8221..cf920bd794 100644 --- a/tests/test_codebase/test_mmseg/test_mmseg_models.py +++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py @@ -40,10 +40,21 @@ def test_encoderdecoder_predict(backend): @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) -def test_basesegmentor_forward(backend): +@pytest.mark.parametrize('with_argmax,use_sigmoid', [(True, False), + (False, True)]) +def test_basesegmentor_forward(backend: Backend, with_argmax: bool, + use_sigmoid: bool): check_backend(backend) + config_path = 'tests/test_codebase/test_mmseg/data/model.py' + model_cfg = mmengine.Config.fromfile(config_path) + if use_sigmoid: + model_cfg.model.decode_head.num_classes = 2 + model_cfg.model.decode_head.out_channels = 1 + model_cfg.model.decode_head.threshold = 0.3 deploy_cfg = generate_mmseg_deploy_config(backend.value) - task_processor = generate_mmseg_task_processor(deploy_cfg=deploy_cfg) + deploy_cfg.codebase_config.with_argmax = with_argmax + task_processor = generate_mmseg_task_processor( + deploy_cfg=deploy_cfg, model_cfg=model_cfg) segmentor = task_processor.build_pytorch_model() size = 256 inputs = torch.randn(1, 3, size, size) @@ -58,7 +69,11 @@ def test_basesegmentor_forward(backend): wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) - assert torch.allclose(model_outputs, rewrite_outputs[0].squeeze(0)) + rewrite_outputs = rewrite_outputs[0] + if not (with_argmax or use_sigmoid): + rewrite_outputs = rewrite_outputs.argmax(dim=1, keepdim=True) + rewrite_outputs = rewrite_outputs.squeeze(0).to(model_outputs) + assert torch.allclose(model_outputs, rewrite_outputs) @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) diff --git a/tests/test_codebase/test_mmseg/utils.py b/tests/test_codebase/test_mmseg/utils.py index 7da63d7a4e..095dda56bd 100644 --- a/tests/test_codebase/test_mmseg/utils.py +++ b/tests/test_codebase/test_mmseg/utils.py @@ -24,7 +24,8 @@ def generate_mmseg_deploy_config(backend='onnxruntime'): deploy_cfg = mmengine.Config( dict( backend_config=dict(type=backend), - codebase_config=dict(type='mmseg', task='Segmentation'), + codebase_config=dict( + type='mmseg', task='Segmentation', with_argmax=False), onnx_config=dict( type='onnx', export_params=True, From 17f445f9aeea9d6ed98e6d576c8c7d8368089bbc Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 13 Jun 2023 11:34:22 +0800 Subject: [PATCH 2/5] fix --- tests/test_codebase/test_mmseg/test_mmseg_models.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_codebase/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py index cf920bd794..79379fac0f 100644 --- a/tests/test_codebase/test_mmseg/test_mmseg_models.py +++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py @@ -36,7 +36,10 @@ def test_encoderdecoder_predict(backend): wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) - assert torch.allclose(model_outputs, rewrite_outputs[0].squeeze(0)) + rewrite_outputs = segmentor.postprocess_result(rewrite_outputs[0], + data_samples) + rewrite_outputs = rewrite_outputs[0].pred_sem_seg.data + assert torch.allclose(model_outputs, rewrite_outputs) @pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME]) From c73bb49f27a4777fd5c31cc5c505a522d64aee76 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Wed, 14 Jun 2023 10:39:42 +0800 Subject: [PATCH 3/5] fix --- mmdeploy/codebase/mmseg/models/segmentors/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdeploy/codebase/mmseg/models/segmentors/base.py b/mmdeploy/codebase/mmseg/models/segmentors/base.py index c754e3e161..d2b7624396 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/base.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/base.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import torch from mmseg.structures import SegDataSample from mmdeploy.core import FUNCTION_REWRITER, mark @@ -58,9 +59,8 @@ def __mark_seg_logit(seg_logit): with_argmax = get_codebase_config(ctx.cfg).get('with_argmax', True) # deal with out_channels=1 with two classes if seg_logit.shape[1] == 1: - seg_logit = seg_logit.sigmoid() seg_pred = seg_logit > self.decode_head.threshold - seg_pred = seg_pred.to(seg_logit.device) + seg_pred = seg_pred.to(torch.int64) else: seg_pred = __mark_seg_logit(seg_logit) if with_argmax: From aeccb6fbcf2b4106d2352b8a78588eefdd261e2f Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Thu, 15 Jun 2023 15:32:47 +0800 Subject: [PATCH 4/5] fix --- mmdeploy/codebase/mmseg/models/segmentors/base.py | 1 + tests/test_codebase/test_mmseg/test_mmseg_models.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/mmdeploy/codebase/mmseg/models/segmentors/base.py b/mmdeploy/codebase/mmseg/models/segmentors/base.py index d2b7624396..960d340628 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/base.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/base.py @@ -59,6 +59,7 @@ def __mark_seg_logit(seg_logit): with_argmax = get_codebase_config(ctx.cfg).get('with_argmax', True) # deal with out_channels=1 with two classes if seg_logit.shape[1] == 1: + seg_logit = seg_logit.sigmoid() seg_pred = seg_logit > self.decode_head.threshold seg_pred = seg_pred.to(torch.int64) else: diff --git a/tests/test_codebase/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py index 79379fac0f..1c03139b45 100644 --- a/tests/test_codebase/test_mmseg/test_mmseg_models.py +++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py @@ -2,6 +2,7 @@ import mmengine import pytest import torch +from packaging import version from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase, Task @@ -51,6 +52,9 @@ def test_basesegmentor_forward(backend: Backend, with_argmax: bool, config_path = 'tests/test_codebase/test_mmseg/data/model.py' model_cfg = mmengine.Config.fromfile(config_path) if use_sigmoid: + import mmseg + if version.parse(mmseg.__version__) <= version.parse('1.0.0'): + pytest.skip('ignore mmseg<=1.0.0') model_cfg.model.decode_head.num_classes = 2 model_cfg.model.decode_head.out_channels = 1 model_cfg.model.decode_head.threshold = 0.3 @@ -73,7 +77,7 @@ def test_basesegmentor_forward(backend: Backend, with_argmax: bool, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) rewrite_outputs = rewrite_outputs[0] - if not (with_argmax or use_sigmoid): + if rewrite_outputs.shape[1] != 1: rewrite_outputs = rewrite_outputs.argmax(dim=1, keepdim=True) rewrite_outputs = rewrite_outputs.squeeze(0).to(model_outputs) assert torch.allclose(model_outputs, rewrite_outputs) From d4cc6f4fdfdc20ac7eda48f2efbd72bbe2017ec0 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Tue, 20 Jun 2023 17:21:07 +0800 Subject: [PATCH 5/5] fix --- .../text-detection}/text-detection_ncnn-int8_static.py | 0 docs/en/05-supported-backends/onnxruntime.md | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename configs/{ => mmocr/text-detection}/text-detection_ncnn-int8_static.py (100%) diff --git a/configs/text-detection_ncnn-int8_static.py b/configs/mmocr/text-detection/text-detection_ncnn-int8_static.py similarity index 100% rename from configs/text-detection_ncnn-int8_static.py rename to configs/mmocr/text-detection/text-detection_ncnn-int8_static.py diff --git a/docs/en/05-supported-backends/onnxruntime.md b/docs/en/05-supported-backends/onnxruntime.md index 2d65322036..1036e40a22 100644 --- a/docs/en/05-supported-backends/onnxruntime.md +++ b/docs/en/05-supported-backends/onnxruntime.md @@ -1,4 +1,4 @@ -# onnxruntime 支持情况 +# onnxruntime Support ## Introduction of ONNX Runtime