Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Commit

Permalink
Merge pull request #749 from yuyu2172/simplify-camvid
Browse files Browse the repository at this point in the history
Unify evaluation for semantic segmentation
  • Loading branch information
Hakuyume authored Jan 17, 2019
2 parents 9ea49c7 + 2ab06c9 commit 1f3aca9
Show file tree
Hide file tree
Showing 13 changed files with 169 additions and 190 deletions.
2 changes: 1 addition & 1 deletion chainercv/links/model/segnet/segnet_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class SegNetBasic(chainer.Chain):
'camvid': {
'param': {'n_class': 11},
'url': 'https://chainercv-models.preferred.jp/'
'segnet_camvid_trained_2017_05_28.npz'
'segnet_camvid_trained_2018_12_05.npz'
}
}

Expand Down
2 changes: 1 addition & 1 deletion examples/segnet/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ $ python demo.py [--gpu <gpu>] [--pretrained-model <model_path>] 0001TP_008550.p
| ChainerCV | 82.7 % | **67.1 %** | **49.4 %** |
| Official | **82.8 %** | 62.3% | 46.3 % |

The evaluation can be conducted using [`chainercv/examples/semantic_segmentation/eval_cityscapes.py`](https://github.com/chainer/chainercv/blob/master/examples/semantic_segmentation).
The evaluation can be conducted using [`chainercv/examples/semantic_segmentation/eval_semantic_segmentation.py`](https://github.com/chainer/chainercv/blob/master/examples/semantic_segmentation).


# Reference
Expand Down
35 changes: 32 additions & 3 deletions examples/segnet/train.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import argparse
from collections import defaultdict
import os

import chainer
import numpy as np

from chainer.dataset import concat_examples
from chainer.datasets import TransformDataset
from chainer import iterators
from chainer import optimizers
Expand All @@ -16,6 +19,31 @@
from chainercv.links import SegNetBasic


def recalculate_bn_statistics(model, batchsize):
train = CamVidDataset(split='train')
it = chainer.iterators.SerialIterator(
train, batchsize, repeat=False, shuffle=False)
bn_avg_mean = defaultdict(np.float32)
bn_avg_var = defaultdict(np.float32)

n_iter = 0
for batch in it:
imgs, _ = concat_examples(batch)
model(model.xp.array(imgs))
for name, link in model.namedlinks():
if name.endswith('_bn'):
bn_avg_mean[name] += link.avg_mean
bn_avg_var[name] += link.avg_var
n_iter += 1

for name, link in model.namedlinks():
if name.endswith('_bn'):
link.avg_mean = bn_avg_mean[name] / n_iter
link.avg_var = bn_avg_var[name] / n_iter

return model


def transform(in_data):
img, label = in_data
if np.random.rand() > 0.5:
Expand Down Expand Up @@ -81,9 +109,6 @@ def main():
['validation/main/miou'], x_key='iteration',
file_name='miou.png'))

trainer.extend(extensions.snapshot_object(
model.predictor, filename='model_iteration-{.updater.iteration}'),
trigger=end_trigger)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'elapsed_time', 'lr',
'main/loss', 'validation/main/miou',
Expand All @@ -100,6 +125,10 @@ def main():

trainer.run()

chainer.serializers.save_npz(
os.path.join(args.out, 'snapshot_model.npz'),
recalculate_bn_statistics(model.predictor, 24))


if __name__ == '__main__':
main()
12 changes: 4 additions & 8 deletions examples/semantic_segmentation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,11 @@ The scores are mIoU.
| PSPNet with ResNet101 (single scale) | 79.70 % [1] | 79.03 % |

```
$ python eval_cityscapes.py [--model pspnet_resnet101] [--gpu <gpu>] [--pretrained-model <model_path>]
$ python eval_semantic_segmentation.py --gpu <GPU> --dataset cityscapes --model pspnet_resnet101
# with multiple GPUs
$ mpiexec -n <#gpu> python eval_semantic_segmentation_multi.py --dataset cityscapes --model pspnet_resnet101
```

You can conduct evaluation with multiple GPUs by `eval_cityscapes_multi.py`.
Note that this script requires ChainerMN.

```
$ mpiexec -n <#gpu> python eval_cityscapes_multi.py [--model pspnet_resnet101] [--pretrained-model <model_path>]
```

### CamVid

Expand All @@ -35,7 +31,7 @@ $ mpiexec -n <#gpu> python eval_cityscapes_multi.py [--model pspnet_resnet101] [
| SegNet | 46.3 % [2] | 49.4 % |

```
$ python eval_camvid.py [--gpu <gpu>] [--pretrained-model <model_path>] [--batchsize <batchsize>]
$ python eval_semantic_segmentation.py --gpu <GPU> --dataset camvid --model segnet
```


Expand Down
83 changes: 0 additions & 83 deletions examples/semantic_segmentation/eval_camvid.py

This file was deleted.

62 changes: 0 additions & 62 deletions examples/semantic_segmentation/eval_cityscapes.py

This file was deleted.

101 changes: 101 additions & 0 deletions examples/semantic_segmentation/eval_semantic_segmentation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import argparse

import chainer
from chainer import iterators

from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import ADE20KSemanticSegmentationDataset
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import CityscapesSemanticSegmentationDataset

from chainercv.evaluations import eval_semantic_segmentation
from chainercv.experimental.links import PSPNetResNet101
from chainercv.experimental.links import PSPNetResNet50
from chainercv.links import SegNetBasic
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook


def get_dataset_and_model(dataset_name, model_name, pretrained_model,
input_size):
if dataset_name == 'cityscapes':
dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
label_names = cityscapes_semantic_segmentation_label_names
elif dataset_name == 'ade20k':
dataset = ADE20KSemanticSegmentationDataset(split='val')
label_names = ade20k_semantic_segmentation_label_names
elif dataset_name == 'camvid':
dataset = CamVidDataset(split='test')
label_names = camvid_label_names

n_class = len(label_names)

if pretrained_model:
pretrained_model = pretrained_model
else:
pretrained_model = dataset_name
if model_name == 'pspnet_resnet101':
model = PSPNetResNet101(
n_class=n_class,
pretrained_model=pretrained_model,
input_size=input_size
)
elif model_name == 'pspnet_resnet50':
model = PSPNetResNet50(
n_class=n_class,
pretrained_model=pretrained_model,
input_size=input_size
)
elif model_name == 'segnet':
model = SegNetBasic(
n_class=n_class, pretrained_model=pretrained_model)
return dataset, label_names, model


def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet'))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()

dataset, label_names, model = get_dataset_and_model(
args.dataset, args.model, args.pretrained_model,
(args.input_size, args.input_size))

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()

it = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)

in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)))
# Delete an iterator of images to save memory usage.
del in_values
pred_labels, = out_values
gt_labels, = rest_values

result = eval_semantic_segmentation(pred_labels, gt_labels)

for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
print('{:>23} : {:.4f}'.format(
'Class average accuracy', result['mean_class_accuracy']))
print('{:>23} : {:.4f}'.format(
'Global average accuracy', result['pixel_accuracy']))


if __name__ == '__main__':
main()
Loading

0 comments on commit 1f3aca9

Please sign in to comment.