Skip to content
This repository was archived by the owner on Jul 2, 2021. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 55 additions & 55 deletions examples/semantic_segmentation/eval_semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,16 @@
from chainercv.utils import ProgressHook


def get_dataset_and_model(dataset_name, model_name, pretrained_model,
input_size):
models = {
'pspnet_resnet50': (PSPNetResNet50, {}, 1),
'pspnet_resnet101': (PSPNetResNet101, {}, 1),
'segnet': (SegNetBasic, {}, 1),
'deeplab_v3plus_xception65': (DeepLabV3plusXception65, {}, 1),
}


def setup(dataset, model, pretrained_model, batchsize, input_size):
dataset_name = dataset
if dataset_name == 'cityscapes':
dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
Expand All @@ -37,81 +45,73 @@ def get_dataset_and_model(dataset_name, model_name, pretrained_model,
dataset = VOCSemanticSegmentationDataset(split='val')
label_names = voc_semantic_segmentation_label_names

n_class = len(label_names)

if pretrained_model:
pretrained_model = pretrained_model
def eval_(out_values, rest_values):
pred_labels, = out_values
gt_labels, = rest_values

result = eval_semantic_segmentation(pred_labels, gt_labels)

for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
print('{:>23} : {:.4f}'.format(
'Class average accuracy', result['mean_class_accuracy']))
print('{:>23} : {:.4f}'.format(
'Global average accuracy', result['pixel_accuracy']))

cls, pretrained_models, default_batchsize = models[model]
if pretrained_model is None:
pretrained_model = pretrained_models.get(dataset_name, dataset_name)
if input_size is None:
input_size = None
else:
pretrained_model = dataset_name
if model_name == 'pspnet_resnet101':
model = PSPNetResNet101(
n_class=n_class,
pretrained_model=pretrained_model,
input_size=input_size
)
elif model_name == 'pspnet_resnet50':
model = PSPNetResNet50(
n_class=n_class,
pretrained_model=pretrained_model,
input_size=input_size
)
elif model_name == 'segnet':
model = SegNetBasic(
n_class=n_class, pretrained_model=pretrained_model)
elif model_name == 'deeplab_v3plus_xception65':
model = DeepLabV3plusXception65(
n_class=n_class,
pretrained_model=pretrained_model,
min_input_size=input_size)

return dataset, label_names, model
input_size = (input_size, input_size)

kwargs = {
'n_class': len(label_names),
'pretrained_model': pretrained_model,
}
if model in ['pspnet_resnet50', 'pspnet_resnet101']:
kwargs.update({'input_size': input_size})
elif model == 'deeplab_v3plus_xception65':
kwargs.update({'min_input_size': input_size})
model = cls(**kwargs)

if batchsize is None:
batchsize = default_batchsize

return dataset, eval_, model, batchsize


def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet', 'deeplab_v3plus_xception65'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()

if args.input_size is None:
input_size = None
else:
input_size = (args.input_size, args.input_size)

dataset, label_names, model = get_dataset_and_model(
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model,
input_size)
args.batchsize, args.input_size)

if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()

it = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
iterator = iterators.SerialIterator(
dataset, batchsize, repeat=False, shuffle=False)

in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)))
model.predict, iterator, hook=ProgressHook(len(dataset)))
# Delete an iterator of images to save memory usage.
del in_values
pred_labels, = out_values
gt_labels, = rest_values

result = eval_semantic_segmentation(pred_labels, gt_labels)

for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
print('{:>23} : {:.4f}'.format(
'Class average accuracy', result['mean_class_accuracy']))
print('{:>23} : {:.4f}'.format(
'Global average accuracy', result['pixel_accuracy']))

eval_(out_values, rest_values)


if __name__ == '__main__':
Expand Down
33 changes: 8 additions & 25 deletions examples/semantic_segmentation/eval_semantic_segmentation_multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,35 +5,29 @@

import chainermn

from chainercv.evaluations import eval_semantic_segmentation
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook

from eval_semantic_segmentation import get_dataset_and_model
from eval_semantic_segmentation import models
from eval_semantic_segmentation import setup


def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
parser.add_argument(
'--model', choices=(
'pspnet_resnet101', 'segnet', 'deeplab_v3plus_xception65'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()

comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank

if args.input_size is None:
input_size = None
else:
input_size = (args.input_size, args.input_size)

dataset, label_names, model = get_dataset_and_model(
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model,
input_size)
args.batchsize, args.input_size)

chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
Expand All @@ -43,25 +37,14 @@ def main():
return

it = iterators.MultithreadIterator(
dataset, comm.size, repeat=False, shuffle=False)
dataset, batchsize * comm.size, repeat=False, shuffle=False)

in_values, out_values, rest_values = apply_to_iterator(
model.predict, it, hook=ProgressHook(len(dataset)), comm=comm)
# Delete an iterator of images to save memory usage.
del in_values
pred_labels, = out_values
gt_labels, = rest_values

result = eval_semantic_segmentation(pred_labels, gt_labels)

for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
print('{:>23} : {:.4f}'.format(
'Class average accuracy', result['mean_class_accuracy']))
print('{:>23} : {:.4f}'.format(
'Global average accuracy', result['pixel_accuracy']))
eval_(out_values, rest_values)


if __name__ == '__main__':
Expand Down