Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[core] Fix swish op and the output type of benchmark_bin #10500

Merged
merged 1 commit into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 54 additions & 36 deletions lite/api/tools/benchmark/benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,47 @@ void RunImpl(std::shared_ptr<PaddlePredictor> predictor,
perf_data->set_post_process_time(timer.Stop());
}
#endif
template <typename T>
void outputIdxTensor(const Tensor* output_tensor,
std::stringstream& out_ss,
size_t tidx) {
auto out_shape = output_tensor->shape();
auto out_data = output_tensor->data<T>();
auto ele_num = lite::ShapeProduction(out_shape);
auto out_mean = lite::compute_mean<T>(out_data, ele_num);
auto out_std_dev =
lite::compute_standard_deviation<T>(out_data, ele_num, true, out_mean);

out_ss << "output shape(NCHW): " << lite::ShapePrint(out_shape) << std::endl;
out_ss << "output tensor " << tidx << " elem num: " << ele_num << std::endl;
out_ss << "output tensor " << tidx << " mean value: " << out_mean
<< std::endl;
out_ss << "output tensor " << tidx << " standard deviation: " << out_std_dev
<< std::endl;

if (FLAGS_show_output_elem) {
for (int i = 0; i < ele_num; ++i) {
out_ss << "out[" << tidx << "][" << i
<< "]:" << output_tensor->data<T>()[i] << std::endl;
}
}

// TODO(sprouteer): Only support float for now, add more types if needed.
if (!FLAGS_output_data_path.empty()) {
std::stringstream out_data;
auto output_path = lite::Split(FLAGS_output_data_path, ":");
if (output_path.size() <= tidx) {
std::cerr << "Fail to write output tensor to file, tensor_output_path "
"not matching output tensor number. "
<< std::endl;
} else {
for (int i = 0; i < ele_num; ++i) {
out_data << output_tensor->data<T>()[i] << std::endl;
}
StoreOutputTensor(out_data, output_path[tidx]);
}
}
}

void Run(const std::string& model_file,
const std::vector<std::vector<int64_t>>& input_shapes) {
Expand Down Expand Up @@ -225,42 +266,19 @@ void Run(const std::string& model_file,
for (size_t tidx = 0; tidx < output_tensor_num; ++tidx) {
std::unique_ptr<const Tensor> output_tensor = predictor->GetOutput(tidx);
out_ss << "\n--- output tensor " << tidx << " ---\n";
auto out_shape = output_tensor->shape();
auto out_data = output_tensor->data<float>();
auto ele_num = lite::ShapeProduction(out_shape);
auto out_mean = lite::compute_mean<float>(out_data, ele_num);
auto out_std_dev = lite::compute_standard_deviation<float>(
out_data, ele_num, true, out_mean);

out_ss << "output shape(NCHW): " << lite::ShapePrint(out_shape)
<< std::endl;
out_ss << "output tensor " << tidx << " elem num: " << ele_num << std::endl;
out_ss << "output tensor " << tidx << " mean value: " << out_mean
<< std::endl;
out_ss << "output tensor " << tidx << " standard deviation: " << out_std_dev
<< std::endl;

if (FLAGS_show_output_elem) {
for (int i = 0; i < ele_num; ++i) {
out_ss << "out[" << tidx << "][" << i
<< "]:" << output_tensor->data<float>()[i] << std::endl;
}
}

// TODO(sprouteer): Only support float for now, add more types if needed.
if (!FLAGS_output_data_path.empty()) {
std::stringstream out_data;
auto output_path = lite::Split(FLAGS_output_data_path, ":");
if (output_path.size() <= tidx) {
std::cerr << "Fail to write output tensor to file, tensor_output_path "
"not matching output tensor number. "
<< std::endl;
} else {
for (int i = 0; i < ele_num; ++i) {
out_data << output_tensor->data<float>()[i] << std::endl;
}
StoreOutputTensor(out_data, output_path[tidx]);
}
switch (output_tensor->precision()) {
case PRECISION(kFloat):
outputIdxTensor<float>(output_tensor.get(), out_ss, tidx);
break;
case PRECISION(kInt32):
outputIdxTensor<int32_t>(output_tensor.get(), out_ss, tidx);
break;
case PRECISION(kInt64):
outputIdxTensor<int64_t>(output_tensor.get(), out_ss, tidx);
break;
default:
LOG(FATAL) << "outputIdxTensor unsupported precision type: "
<< static_cast<int>(output_tensor->precision());
}
}

Expand Down
6 changes: 5 additions & 1 deletion lite/operators/activation_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,11 @@ bool ActivationOp::AttachImpl(const cpp::OpDesc& opdesc, lite::Scope* scope) {
scope->FindVar(prelu_alpha_name)->GetMutable<lite::Tensor>();
param_.active_type = lite_api::ActivationType::kPRelu;
} else if (opdesc.Type() == "swish") {
param_.Swish_beta = opdesc.GetAttr<float>("beta");
if (opdesc.HasAttr("beta")) {
param_.Swish_beta = opdesc.GetAttr<float>("beta");
} else {
param_.Swish_beta = 1.0f;
}
param_.active_type = lite_api::ActivationType::kSwish;
} else if (opdesc.Type() == "hard_sigmoid") {
param_.active_type = lite_api::ActivationType::kHardSigmoid;
Expand Down