Skip to content

Commit

Permalink
[OTE][MPA] Apply ignored_labels to C-IL for semantic segmentation (#1134
Browse files Browse the repository at this point in the history
)

* add ignored labels to use external ignored labels

* update mpa submodule

* apply ignored_labels in sample

* fix flake8 issues

* update mpa submodule

* update mpa submodule

* update mpa submodule

* update mpa submodule

* move label_idx creation to init
  • Loading branch information
chuneuny-emily authored Jun 16, 2022
1 parent aaab5de commit 60ae375
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ class _DataInfoProxy:
def __init__(self, ote_dataset, labels=None):
self.ote_dataset = ote_dataset
self.labels = labels
self.label_idx = {label.id: i for i, label in enumerate(labels)}

def __len__(self):
return len(self.ote_dataset)
Expand All @@ -95,15 +96,16 @@ def __getitem__(self, index):
:return data_info: dictionary that contains the image and image metadata, as well as the labels of
the objects in the image
"""

dataset = self.ote_dataset
item = dataset[index]
ignored_labels = np.array([self.label_idx[lbs.id] + 1 for lbs in item.ignored_labels])

data_info = dict(dataset_item=item,
width=item.width,
height=item.height,
index=index,
ann_info=dict(labels=self.labels))
ann_info=dict(labels=self.labels),
ignored_labels=ignored_labels)

return data_info

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,8 @@ def patch_color_conversion(pipeline):
elif pipeline_step.type == 'LoadAnnotations':
pipeline_step.type = 'LoadAnnotationFromOTEDataset'
pipeline_step.domain = domain
if subset == 'train' and pipeline_step.type == 'Collect':
pipeline_step = BaseTask._get_meta_keys(pipeline_step)
patch_color_conversion(cfg.pipeline)

@staticmethod
Expand Down Expand Up @@ -328,7 +330,7 @@ def train(self,
output_model: ModelEntity,
train_parameters: Optional[TrainParameters] = None):
logger.info('train()')
# Check for stop signal between pre-eval and training.
# Check for stop signal between pre-eval and training.
# If training is cancelled at this point,
if self._should_stop:
logger.info('Training cancelled.')
Expand All @@ -350,7 +352,7 @@ def train(self,
self._is_training = True
results = self._run_task(stage_module, mode='train', dataset=dataset, parameters=train_parameters)

# Check for stop signal when training has stopped.
# Check for stop signal when training has stopped.
# If should_stop is true, training was cancelled and no new
if self._should_stop:
logger.info('Training cancelled.')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,14 @@ def gen_rect_image(resolution):
LabelEntity(name='rect', domain=Domain.SEGMENTATION, id=2),
]

def get_image(subset, label_id):
def get_image(type, subset, label_id):
ignored_labels = []
if label_id == 1:
image, gt = gen_circle_image((640, 480))
if type == 'new' and subset == Subset.TRAINING:
ignored_labels = [
LabelEntity(name='rect', domain=Domain.SEGMENTATION, id=2)
]
else:
image, gt = gen_rect_image((640, 480))

Expand Down Expand Up @@ -99,50 +104,51 @@ def get_image(subset, label_id):
kind=AnnotationSceneKind.ANNOTATION
),
subset=subset,
ignored_labels=ignored_labels,
)

old_train = [
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
get_image('old', Subset.TRAINING, 1),
]

old_val = [
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
get_image('old', Subset.VALIDATION, 1),
]

new_train = [
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 1),
get_image(Subset.TRAINING, 2),
get_image(Subset.TRAINING, 2),
get_image(Subset.TRAINING, 2),
get_image(Subset.TRAINING, 2),
get_image('new', Subset.TRAINING, 1),
get_image('new', Subset.TRAINING, 1),
get_image('new', Subset.TRAINING, 1),
get_image('new', Subset.TRAINING, 1),
get_image('new', Subset.TRAINING, 2),
get_image('new', Subset.TRAINING, 2),
get_image('new', Subset.TRAINING, 2),
get_image('new', Subset.TRAINING, 2),
]

new_val = [
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 1),
get_image(Subset.VALIDATION, 2),
get_image(Subset.VALIDATION, 2),
get_image(Subset.VALIDATION, 2),
get_image(Subset.VALIDATION, 2),
get_image('new', Subset.VALIDATION, 1),
get_image('new', Subset.VALIDATION, 1),
get_image('new', Subset.VALIDATION, 1),
get_image('new', Subset.VALIDATION, 1),
get_image('new', Subset.VALIDATION, 2),
get_image('new', Subset.VALIDATION, 2),
get_image('new', Subset.VALIDATION, 2),
get_image('new', Subset.VALIDATION, 2),
]
old = old_train + old_val
new = new_train + new_val
Expand Down

0 comments on commit 60ae375

Please sign in to comment.