Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Commit

Permalink
merge eval_camvid and eval_cityscapes
Browse files Browse the repository at this point in the history
  • Loading branch information
yuyu2172 committed Dec 5, 2018
1 parent 8ffb55a commit ea46f5a
Show file tree
Hide file tree
Showing 11 changed files with 89 additions and 104 deletions.
2 changes: 1 addition & 1 deletion chainercv/links/model/segnet/segnet_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class SegNetBasic(chainer.Chain):
'camvid': {
'param': {'n_class': 11},
'url': 'https://chainercv-models.preferred.jp/'
'segnet_camvid_trained_2017_05_28.npz'
'segnet_camvid_trained_2018_12_05.npz'
}
}

Expand Down
2 changes: 1 addition & 1 deletion examples/segnet/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ $ python demo.py [--gpu <gpu>] [--pretrained-model <model_path>] 0001TP_008550.p
| ChainerCV | 82.7 % | **67.1 %** | **49.4 %** |
| Official | **82.8 %** | 62.3% | 46.3 % |

The evaluation can be conducted using [`chainercv/examples/semantic_segmentation/eval_cityscapes.py`](https://github.com/chainer/chainercv/blob/master/examples/semantic_segmentation).
The evaluation can be conducted using [`chainercv/examples/semantic_segmentation/eval_semantic_segmentation.py`](https://github.com/chainer/chainercv/blob/master/examples/semantic_segmentation).


# Reference
Expand Down
12 changes: 4 additions & 8 deletions examples/semantic_segmentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,11 @@ The scores are mIoU.
| PSPNet with ResNet101 (single scale) | 79.70 % [1] | 79.03 % |

```
$ python eval_cityscapes.py [--model pspnet_resnet101] [--gpu <gpu>] [--pretrained-model <model_path>]
$ python eval_semantic_segmentation.py --gpu <GPU> --dataset cityscapes --model pspnet_resnet101
# with multiple GPUs
$ mpiexec -n <#gpu> python eval_semantic_segmentation_multi.py --dataset cityscapes --model pspnet_resnet101
```

You can conduct evaluation with multiple GPUs by `eval_cityscapes_multi.py`.
Note that this script requires ChainerMN.

```
$ mpiexec -n <#gpu> python eval_cityscapes_multi.py [--model pspnet_resnet101] [--pretrained-model <model_path>]
```

### CamVid

Expand All @@ -35,7 +31,7 @@ $ mpiexec -n <#gpu> python eval_cityscapes_multi.py [--model pspnet_resnet101] [
| SegNet | 46.3 % [2] | 49.4 % |

```
$ python eval_camvid.py [--gpu <gpu>] [--pretrained-model <model_path>] [--batchsize <batchsize>]
$ python eval_semantic_segmentation.py --gpu <GPU> --dataset camvid --model segnet
```


Expand Down
53 changes: 0 additions & 53 deletions examples/semantic_segmentation/eval_camvid.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -3,38 +3,59 @@
import chainer
from chainer import iterators

from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import ADE20KSemanticSegmentationDataset
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import CityscapesSemanticSegmentationDataset
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset

from chainercv.evaluations import eval_semantic_segmentation
from chainercv.experimental.links import PSPNetResNet101
from chainercv.links import SegNetBasic
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook


def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('pspnet_resnet101',),
default='pspnet_resnet101')
'--dataset', choices=('cityscapes', 'ade20k', 'camvid'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet'))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
args = parser.parse_args()

if args.dataset == 'cityscapes':
dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
label_names = cityscapes_semantic_segmentation_label_names
elif args.dataset == 'ade20k':
dataset = ADE20KSemanticSegmentationDataset(split='val')
label_names = ade20k_semantic_segmentation_label_names
elif args.dataset == 'camvid':
dataset = CamVidDataset(split='test')
label_names = camvid_label_names

if args.pretrained_model:
pretrained_model = args.pretrained_model
else:
pretrained_model = args.dataset
if args.model == 'pspnet_resnet101':
if args.pretrained_model:
model = PSPNetResNet101(
n_class=len(cityscapes_semantic_segmentation_label_names),
pretrained_model=args.pretrained_model, input_size=(713, 713)
)
else:
model = PSPNetResNet101(pretrained_model='cityscapes')
model = PSPNetResNet101(
n_class=len(label_names),
pretrained_model=pretrained_model, input_size=(713, 713)
)
elif args.model == 'segnet':
model = SegNetBasic(
n_class=len(label_names), pretrained_model=pretrained_model)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()

dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
it = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)

Expand All @@ -47,8 +68,7 @@ def main():

result = eval_semantic_segmentation(pred_labels, gt_labels)

for iu, label_name in zip(
result['iou'], cityscapes_semantic_segmentation_label_names):
for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,41 +8,61 @@

import chainermn

from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import ADE20KSemanticSegmentationDataset
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import CityscapesSemanticSegmentationDataset
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset

from chainercv.evaluations import calc_semantic_segmentation_confusion
from chainercv.evaluations import calc_semantic_segmentation_iou
from chainercv.experimental.links import PSPNetResNet101
from chainercv.links import SegNetBasic
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook


def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('pspnet_resnet101',),
default='pspnet_resnet101')
'--dataset', choices=('cityscapes', 'ade20k', 'camvid'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet'))
parser.add_argument('--pretrained-model')
args = parser.parse_args()

comm = chainermn.create_communicator()
device = comm.intra_rank

if args.dataset == 'cityscapes':
dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
label_names = cityscapes_semantic_segmentation_label_names
elif args.dataset == 'ade20k':
dataset = ADE20KSemanticSegmentationDataset(split='val')
label_names = ade20k_semantic_segmentation_label_names
elif args.dataset == 'camvid':
dataset = CamVidDataset(split='test')
label_names = camvid_label_names

if args.pretrained_model:
pretrained_model = args.pretrained_model
else:
pretrained_model = args.dataset
if args.model == 'pspnet_resnet101':
if args.pretrained_model:
model = PSPNetResNet101(
n_class=len(cityscapes_semantic_segmentation_label_names),
pretrained_model=args.pretrained_model, input_size=(713, 713)
)
else:
model = PSPNetResNet101(pretrained_model='cityscapes')
model = PSPNetResNet101(
n_class=len(label_names),
pretrained_model=pretrained_model, input_size=(713, 713)
)
elif args.model == 'segnet':
model = SegNetBasic(
n_class=len(label_names), pretrained_model=pretrained_model)

chainer.cuda.get_device_from_id(device).use()
model.to_gpu()

dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')

if comm.rank == 0:
indices = np.arange(len(dataset))
else:
Expand All @@ -68,8 +88,7 @@ def main():
pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)

for iu, label_name in zip(
iou, cityscapes_semantic_segmentation_label_names):
for iu, label_name in zip(iou, label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou)))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
cd examples/semantic_segmentation

sed -e 's/CamVidDataset(split='\''test'\'')/CamVidDataset(split='\''test'\'').slice[:20]/' -i eval_camvid.py
$PYTHON eval_semantic_segmentation.py --gpu 0 --dataset camvid --model segnet

sed -e 's/label_resolution='\''fine'\'')/label_resolution='\''fine'\'').slice[:20]/' \
-i eval_cityscapes.py
$PYTHON eval_semantic_segmentation.py --gpu 0 --dataset cityscapes --model pspnet_resnet101

Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
cd examples/semantic_segmentation

sed -e 's/CamVidDataset(split='\''test'\'')/CamVidDataset(split='\''test'\'').slice[:20]/' -i eval_camvid.py
$MPIEXEC $PYTHON eval_semantic_segmentation_multi.py --gpu 0 --dataset camvid --model segnet

sed -e 's/label_resolution='\''fine'\'')/label_resolution='\''fine'\'').slice[:20]/' \
-i eval_cityscapes.py
$MPIEXEC $PYTHON eval_semantic_segmentation_multi.py --gpu 0 --dataset cityscapes --model pspnet_resnet101

This file was deleted.

This file was deleted.

This file was deleted.

0 comments on commit ea46f5a

Please sign in to comment.