Skip to content

Commit

Permalink
polish
Browse files Browse the repository at this point in the history
  • Loading branch information
LutaoChu committed Feb 3, 2021
1 parent 60b78a8 commit 0d8ab3d
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 31 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml
* [API Tutorial](https://aistudio.baidu.com/aistudio/projectdetail/1339458)
* [Data Preparation](./docs/data_prepare.md)
* [Training Configuration](./configs/)
* [Loss Usage](./docs/loss_usage.md)
* [API References](./docs/apis)
* [Add New Components](./docs/add_new_model.md)

Expand Down
1 change: 1 addition & 0 deletions README_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml
* [API使用教程](https://aistudio.baidu.com/aistudio/projectdetail/1339458)
* [数据集准备](./docs/data_prepare.md)
* [配置项](./configs/)
* [Loss使用](./docs/loss_usage.md)
* [API参考](./docs/apis)
* [添加新组件](./docs/add_new_model.md)

Expand Down
4 changes: 4 additions & 0 deletions docs/loss_usage.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Loss usage

- [Lovasz loss](lovasz_loss.md)
- To be continued
30 changes: 8 additions & 22 deletions paddleseg/models/losses/lovasz_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,10 @@ def forward(self, logits, labels):
Args:
logits: [N, C, H, W] Tensor, logits at each prediction (between -\infty and +\infty)
labels: [N, 1, H, W] or [N, H, W] Tensor, ground truth labels (between 0 and C - 1)
labels: [N, 1, H, W] or [N, H, W] Tensor, ground truth labels (between 0 and C - 1)
"""
probas = F.softmax(
logits, axis=1
) # probas grad [ 0.0000000e+00 0.0000000e+00 0.0000000e+00 0.0000000e+00 -5.5730345e-07]
vprobas, vlabels = flatten_probas(
probas, labels, self.ignore_index
) # vprobas grad和torch不同,第一个类别是0,torch不是,第一个类别的第一个元素是0.05
probas = F.softmax(logits, axis=1)
vprobas, vlabels = flatten_probas(probas, labels, self.ignore_index)
loss = lovasz_softmax_flat(vprobas, vlabels, classes=self.classes)
return loss

Expand Down Expand Up @@ -100,10 +96,6 @@ def lovasz_grad(gt_sorted):

if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
# jaccard0 = paddle.slice(jaccard, axis=[0], starts=[0], ends=[1])
# jaccard1 = paddle.slice(jaccard, axis=[0], starts=[1], ends=[len_gt])
# jaccard2 = paddle.slice(jaccard, axis=[0], starts=[0], ends=[-1])
# jaccard = paddle.concat([jaccard0, jaccard1 - jaccard2], axis=0)
return jaccard


Expand Down Expand Up @@ -148,14 +140,11 @@ def flatten_binary_scores(scores, labels, ignore=None):
if ignore is None:
return scores, labels
valid = labels != ignore
# ignore = paddle.cast(ignore, 'int32')
valid_mask = paddle.reshape(valid, (-1, 1))
indexs = paddle.nonzero(valid_mask)
indexs.stop_gradient = True
vscores = paddle.gather(scores, indexs[:, 0])
vlabels = paddle.gather(labels, indexs[:, 0])
# vscores = paddle.squeeze(vscores, axis=1)
# vlabels = paddle.squeeze(vlabels, axis=1)
return vscores, vlabels


Expand Down Expand Up @@ -184,23 +173,23 @@ def lovasz_softmax_flat(probas, labels, classes='present'):
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = paddle.abs(fg - class_pred) # errors梯度不同
errors = paddle.abs(fg - class_pred)
errors_sorted, perm = paddle.fluid.core.ops.argsort(
errors, 'axis', 0, 'descending', True)
errors_sorted.stop_gradient = False # errors_sorted梯度相同
errors_sorted.stop_gradient = False

fg_sorted = paddle.gather(fg, perm)
fg_sorted.stop_gradient = True

grad = lovasz_grad(fg_sorted) # grad值相同,无梯度
grad = lovasz_grad(fg_sorted)
grad.stop_gradient = True
loss = paddle.sum(errors_sorted * grad)
losses.append(loss) # loss梯度相同,值相同
losses.append(loss)

if len(classes_to_sum) == 1:
return losses[0]

losses_tensor = paddle.stack(losses) # losses_tensor值相同,梯度也相同
losses_tensor = paddle.stack(losses)
mean_loss = paddle.mean(losses_tensor)
return mean_loss

Expand All @@ -218,12 +207,9 @@ def flatten_probas(probas, labels, ignore=None):
if ignore is None:
return probas, labels
valid = labels != ignore
# valid = paddle.cast(valid, 'int32')
valid_mask = paddle.reshape(valid, [-1, 1])
indexs = paddle.nonzero(valid_mask)
indexs.stop_gradient = True
vprobas = paddle.gather(probas, indexs[:, 0])
# print(probas.shape, vprobas.shape) # [1789832, 20] [1700971, 20]
vlabels = paddle.gather(labels, indexs[:, 0])
# vlabels = paddle.squeeze(vlabels, axis=1)
return vprobas, vlabels
23 changes: 14 additions & 9 deletions paddleseg/models/losses/mixed_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,18 @@

@manager.LOSSES.add_component
class MixedLoss(nn.Layer):
"""
Weighted computations for multiple Loss.
The advantage is that mixed loss training can be achieved without changing the networking code.
Args:
losses (list of nn.Layer): A list consisting of multiple loss classes
coef (float|int): Weighting coefficient of multiple loss
Returns:
A callable object of MixedLoss.
"""

def __init__(self, losses, coef):
super(MixedLoss, self).__init__()
if not isinstance(losses, list):
Expand All @@ -38,16 +50,9 @@ def __init__(self, losses, coef):
self.coef = coef

def forward(self, logits, labels):
final_output = 0
loss_list = []
final_output = 0
for i, loss in enumerate(self.losses):
output = loss(logits, labels)
final_output += output * self.coef[i]
loss_list.append(output * self.coef[i])
return final_output, loss_list

# loss_list = []
# for i, loss in enumerate(self.losses):
# output = loss(logits, labels)
# loss_list.append(output * self.coef[i])
# return loss_list
return final_output

0 comments on commit 0d8ab3d

Please sign in to comment.