-
Notifications
You must be signed in to change notification settings - Fork 5.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add triplet loss for metric lerning. #11461
Changes from all commits
4996368
f8f284d
818cb32
175f441
763d2a1
6cdfdeb
30325a6
5327292
09621ea
98d6812
b7761e9
e34667d
3a1db7e
320cecb
dd83a6d
8486164
d05743f
7123772
2d3e41a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,163 @@ | ||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
#include "paddle/fluid/operators/dense_triplet_loss_op.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
template <> | ||
std::vector<int> GetOffsets<platform::CPUDeviceContext>(const Tensor* t) { | ||
std::vector<int> offsets; | ||
offsets.push_back(0); | ||
const int64_t* data = t->data<int64_t>(); | ||
int64_t currrent_value = data[0]; | ||
for (int i = 1; i < t->numel(); ++i) { | ||
if (data[i] != currrent_value) { | ||
offsets.push_back(i); | ||
} | ||
currrent_value = data[i]; | ||
} | ||
offsets.push_back(t->numel()); | ||
return offsets; | ||
} | ||
|
||
class DenseTripletLossOpMaker : public framework::OpProtoAndCheckerMaker { | ||
public: | ||
void Make() override { | ||
AddInput("Logits", | ||
"(Tensor, default: Tensor<float>), A 2-D tensor with shape [N x " | ||
"K]. N is the total number of samples, " | ||
"and K is the feature length in each sample."); | ||
AddInput("Label", | ||
"(Tensor) The ground truth which is a 2-D tensor. " | ||
"Label is a Tensor<int64> with shape [N x 1]. "); | ||
AddOutput("Loss", | ||
"(Tensor, default: Tensor<float>), A 2-D tensor. The triplet " | ||
"loss with shape [N x 1]."); | ||
AddOutput("LogitsGrad", | ||
"(Tensor, default: Tensor<float>), A temporary " | ||
"output Tensor to store the gradients of triplet loss, which is " | ||
"computed with loss together in one call. It is a 2-D Tensor of " | ||
"the shape [N, feature_len].") | ||
.AsIntermediate(); | ||
AddAttr<float>("margin", "(float), The min margin between two sample."); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. min -> minimum |
||
|
||
AddComment(R"DOC( | ||
|
||
The Input(logits) is the embeddings of input samples. And the triplet loss is | ||
defined over triplets of embeddings. A triplet contains three samples as below: | ||
- an anchor sample | ||
- a positive sample with the same class as the anchor | ||
- a negative sample with a different class | ||
We define the three samples as $a$, $p$, $n$. Then the loss of | ||
the triplet (a, p, n) is: | ||
$$L = max(d(a, p) - d(a, n) + margin, 0)$$ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 前后留空格。 |
||
In which, $d(a, p)$ means the distance between $a$ and $p$. The negative should | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. distance -> L2 distance ? |
||
be farther away than the positive from anchor by the $margin$. The dense | ||
triplet loss get all the 'triplet' in a batch. And accumulate the loss for each | ||
triplet. | ||
|
||
)DOC"); | ||
} | ||
}; | ||
|
||
class DenseTripletLossOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
|
||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput("Logits"), | ||
"Input(Logits) should be not null."); | ||
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); | ||
PADDLE_ENFORCE(ctx->HasOutput("Loss"), "Output(Loss) should be not null."); | ||
PADDLE_ENFORCE(ctx->HasOutput("LogitsGrad"), | ||
"Output(LogitsGrad) should be not null."); | ||
auto labels_dims = ctx->GetInputDim("Label"); | ||
auto logits_dims = ctx->GetInputDim("Logits"); | ||
PADDLE_ENFORCE_EQ( | ||
logits_dims.size(), 2UL, | ||
"The input of dense_triplet_loss should be a 2-D tensor."); | ||
PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, | ||
"The labels should be a 2-D tensor."); | ||
PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, | ||
"The 2nd dimension of " | ||
"Input(Label) should be 1."); | ||
ctx->SetOutputDim("Loss", {logits_dims[0], 1}); | ||
ctx->SetOutputDim("LogitsGrad", logits_dims); | ||
ctx->ShareLoD("Logits", /*->*/ "Loss"); | ||
} | ||
|
||
protected: | ||
framework::OpKernelType GetExpectedKernelType( | ||
const framework::ExecutionContext& ctx) const override { | ||
return framework::OpKernelType( | ||
framework::ToDataType(ctx.Input<Tensor>("Logits")->type()), | ||
ctx.device_context()); | ||
} | ||
}; | ||
|
||
class DenseTripletLossGradOp : public framework::OperatorWithKernel { | ||
public: | ||
using framework::OperatorWithKernel::OperatorWithKernel; | ||
|
||
void InferShape(framework::InferShapeContext* ctx) const override { | ||
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), | ||
"Input(Loss@Grad) should not be null."); | ||
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); | ||
PADDLE_ENFORCE(ctx->HasInput("LogitsGrad"), | ||
"Input(LogitsGrad) should be not null."); | ||
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")), | ||
"Output(Logits@Grad) should be not null."); | ||
|
||
auto labels_dims = ctx->GetInputDim("Label"); | ||
PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, | ||
"The labels should be a 2-D tensor."); | ||
|
||
PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, | ||
"the 2nd dimension of Input(Label) should be 1."); | ||
|
||
ctx->SetOutputDim(framework::GradVarName("Logits"), | ||
ctx->GetInputDim("LogitsGrad")); | ||
} | ||
|
||
protected: | ||
framework::OpKernelType GetExpectedKernelType( | ||
const framework::ExecutionContext& ctx) const override { | ||
return framework::OpKernelType( | ||
framework::ToDataType( | ||
ctx.Input<Tensor>(framework::GradVarName("Loss"))->type()), | ||
ctx.device_context()); | ||
} | ||
}; | ||
|
||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
|
||
REGISTER_OPERATOR(dense_triplet_loss, ops::DenseTripletLossOp, | ||
ops::DenseTripletLossOpMaker, | ||
paddle::framework::DefaultGradOpDescMaker<true>); | ||
|
||
REGISTER_OPERATOR(dense_triplet_loss_grad, ops::DenseTripletLossGradOp); | ||
|
||
REGISTER_OP_CPU_KERNEL( | ||
dense_triplet_loss, | ||
ops::DenseTripletLossKernel<paddle::platform::CPUDeviceContext, float>, | ||
ops::DenseTripletLossKernel<paddle::platform::CPUDeviceContext, double>); | ||
REGISTER_OP_CPU_KERNEL( | ||
dense_triplet_loss_grad, | ||
ops::DenseTripletLossGradKernel<paddle::platform::CPUDeviceContext, float>, | ||
ops::DenseTripletLossGradKernel<paddle::platform::CPUDeviceContext, | ||
double>); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#define EIGEN_USE_GPU | ||
|
||
#include "paddle/fluid/operators/dense_triplet_loss_op.h" | ||
|
||
namespace paddle { | ||
namespace operators { | ||
|
||
template <> | ||
std::vector<int> GetOffsets<platform::CUDADeviceContext>(const Tensor* t) { | ||
framework::Tensor t_cpu; | ||
framework::TensorCopy(*t, platform::CPUPlace(), &t_cpu); | ||
std::vector<int> offsets; | ||
offsets.push_back(0); | ||
int64_t* data = t_cpu.data<int64_t>(); | ||
int64_t currrent_value = data[0]; | ||
for (int i = 1; i < t->numel(); ++i) { | ||
if (data[i] != currrent_value) { | ||
offsets.push_back(i); | ||
} | ||
currrent_value = data[i]; | ||
} | ||
offsets.push_back(t->numel()); | ||
return offsets; | ||
} | ||
|
||
} // namespace operators | ||
} // namespace paddle | ||
|
||
namespace ops = paddle::operators; | ||
REGISTER_OP_CUDA_KERNEL( | ||
dense_triplet_loss, | ||
ops::DenseTripletLossKernel<paddle::platform::CUDADeviceContext, float>, | ||
ops::DenseTripletLossKernel<paddle::platform::CUDADeviceContext, double>); | ||
REGISTER_OP_CUDA_KERNEL( | ||
dense_triplet_loss_grad, | ||
ops::DenseTripletLossGradKernel<paddle::platform::CUDADeviceContext, float>, | ||
ops::DenseTripletLossGradKernel<paddle::platform::CUDADeviceContext, | ||
double>); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里要求Label是按index排过序的吗?