Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NNAdapter] Add 35 unit tests for model validation from PaddleClas, PaddleDetection, PaddleSeg, PaddleOCR etc. #8401

Merged
merged 11 commits into from
Feb 28, 2022
33 changes: 24 additions & 9 deletions cmake/lite.cmake
Original file line number Diff line number Diff line change
@@ -1,22 +1,37 @@
set(LITE_URL "http://paddle-inference-dist.bj.bcebos.com" CACHE STRING "inference download url")

function(lite_download_and_uncompress INSTALL_DIR URL FILENAME)
message(STATUS "Download inference test stuff: ${FILENAME}")
string(REGEX REPLACE "[-%.]" "_" FILENAME_EX ${FILENAME})
set(EXTERNAL_PROJECT_NAME "extern_lite_download_${FILENAME_EX}")
set(UNPACK_DIR "${INSTALL_DIR}/src/${EXTERNAL_PROJECT_NAME}")
ExternalProject_Add(
set(options "")
set(oneValueArgs MODEL_PATH)
set(multiValueArgs "")
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})

if(DEFINED args_MODEL_PATH)
set(FILE_PATH ${args_MODEL_PATH}/${FILENAME})
set(PREFIX ${INSTALL_DIR}/${args_MODEL_PATH})
set(DOWNLOAD_DIR ${INSTALL_DIR}/${args_MODEL_PATH})
else()
set(FILE_PATH ${FILENAME})
set(PREFIX ${INSTALL_DIR})
set(DOWNLOAD_DIR ${INSTALL_DIR})
endif()

message(STATUS "Download inference test stuff: ${FILE_PATH}")
string(REGEX REPLACE "[-%./]" "_" FILENAME_EX ${FILE_PATH})
set(EXTERNAL_PROJECT_NAME "extern_lite_download_${FILENAME_EX}")
set(UNPACK_DIR "${INSTALL_DIR}/src/${EXTERNAL_PROJECT_NAME}")
ExternalProject_Add(
${EXTERNAL_PROJECT_NAME}
${EXTERNAL_PROJECT_LOG_ARGS}
PREFIX ${INSTALL_DIR}
DOWNLOAD_COMMAND wget --no-check-certificate -q -O ${INSTALL_DIR}/${FILENAME} ${URL}/${FILENAME} && ${CMAKE_COMMAND} -E tar xzf ${INSTALL_DIR}/${FILENAME} && rm -f ${INSTALL_DIR}/${FILENAME}
DOWNLOAD_DIR ${INSTALL_DIR}
PREFIX ${PREFIX}
DOWNLOAD_COMMAND wget --no-check-certificate -q -O ${INSTALL_DIR}/${FILE_PATH} ${URL}/${FILE_PATH} && ${CMAKE_COMMAND} -E tar xzf ${INSTALL_DIR}/${FILE_PATH} && rm -f ${INSTALL_DIR}/${FILE_PATH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
DOWNLOAD_NO_PROGRESS 1
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
UPDATE_COMMAND ""
INSTALL_COMMAND ""
)
)
endfunction()

function (lite_deps TARGET)
Expand Down
130 changes: 90 additions & 40 deletions lite/CMakeLists.txt

Large diffs are not rendered by default.

72 changes: 58 additions & 14 deletions lite/tests/api/CMakeLists.txt

Large diffs are not rendered by default.

6 changes: 1 addition & 5 deletions lite/tests/api/COCO2017_utility.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,17 +48,13 @@ std::vector<std::vector<T>> ReadRawData(
std::ifstream fin(raw_data_file_dir, std::ios::in | std::ios::binary);
CHECK(fin.is_open()) << "failed to open file " << raw_data_file_dir;
fin.seekg(0, std::ios::end);
int file_size = fin.tellg();
fin.seekg(0, std::ios::beg);
CHECK_EQ(static_cast<size_t>(file_size),
static_cast<size_t>(image_size) * sizeof(T) / sizeof(char));
fin.read(reinterpret_cast<char*>(data), file_size);
fin.read(reinterpret_cast<char*>(data), image_size * sizeof(T));
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

为什么把这个check去掉?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

因为新加的检测模型的输入有各种shape大小,现在数据集的raw数据就608x608。后续准备改写成直接读取图片,每个模型单测用例自行做resize和预处理。
而当前检测模型主要是跑通,精度暂时不验证。所以就按上述原因去掉了check。

fin.close();
data += image_size;
}
raw_data.emplace_back(one_iter_raw_data);
}

return raw_data;
}

Expand Down
125 changes: 125 additions & 0 deletions lite/tests/api/test_bisenet_fp32_v2_3_nnadapter.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <vector>
#include "lite/api/paddle_api.h"
#include "lite/api/test/lite_api_test_helper.h"
#include "lite/api/test/test_helper.h"
#include "lite/tests/api/utility.h"
#include "lite/utils/string.h"

DEFINE_string(data_dir, "", "data dir");
DEFINE_int32(iteration, 1, "iteration times to run");

namespace paddle {
namespace lite {

TEST(bisenet, test_bisenet_fp32_v2_3_nnadapter) {
std::vector<std::string> nnadapter_device_names;
std::string nnadapter_context_properties;
std::vector<paddle::lite_api::Place> valid_places;
valid_places.push_back(
lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
#if defined(LITE_WITH_ARM)
valid_places.push_back(lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
#elif defined(LITE_WITH_X86)
valid_places.push_back(lite_api::Place{TARGET(kX86), PRECISION(kFloat)});
#else
LOG(INFO) << "Unsupported host arch!";
return;
#endif
#if defined(NNADAPTER_WITH_HUAWEI_ASCEND_NPU)
nnadapter_device_names.emplace_back("huawei_ascend_npu");
nnadapter_context_properties = "HUAWEI_ASCEND_NPU_SELECTED_DEVICE_IDS=0";
#else
LOG(INFO) << "Unsupported NNAdapter device!";
return;
#endif
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor = nullptr;
// Use the full api with CxxConfig to generate the optimized model
lite_api::CxxConfig cxx_config;
cxx_config.set_model_dir(FLAGS_model_dir);
cxx_config.set_valid_places(valid_places);
cxx_config.set_nnadapter_device_names(nnadapter_device_names);
cxx_config.set_nnadapter_context_properties(nnadapter_context_properties);
predictor = lite_api::CreatePaddlePredictor(cxx_config);
predictor->SaveOptimizedModel(FLAGS_model_dir,
paddle::lite_api::LiteModelType::kNaiveBuffer);
// Use the light api with MobileConfig to load and run the optimized model
paddle::lite_api::MobileConfig mobile_config;
mobile_config.set_model_from_file(FLAGS_model_dir + ".nb");
mobile_config.set_threads(FLAGS_threads);
mobile_config.set_power_mode(
static_cast<lite_api::PowerMode>(FLAGS_power_mode));
mobile_config.set_nnadapter_device_names(nnadapter_device_names);
mobile_config.set_nnadapter_context_properties(nnadapter_context_properties);
predictor = paddle::lite_api::CreatePaddlePredictor(mobile_config);

std::string input_data_dir =
FLAGS_data_dir + std::string("/bisenet_input.txt");
std::string output_data_dir =
FLAGS_data_dir + std::string("/bisenet_output.txt");
std::vector<std::vector<std::vector<uint8_t>>> input_data_set;
std::vector<std::vector<std::vector<int64_t>>> input_data_set_shapes;
LoadSpecificData(input_data_dir,
input_data_set,
input_data_set_shapes,
predictor,
"input");
std::vector<std::vector<std::vector<uint8_t>>> output_data_set;
std::vector<std::vector<std::vector<int64_t>>> output_data_set_shapes;
LoadSpecificData(output_data_dir,
output_data_set,
output_data_set_shapes,
predictor,
"output");

FLAGS_warmup = 1;
for (int i = 0; i < FLAGS_warmup; i++) {
FillModelInput(input_data_set[i], input_data_set_shapes[i], predictor);
predictor->Run();
}

double cost_time = 0;
std::vector<std::vector<float>> results;
for (int i = 0; i < FLAGS_iteration; i++) {
FillModelInput(input_data_set[i], input_data_set_shapes[i], predictor);

double start = GetCurrentUS();
predictor->Run();
cost_time += (GetCurrentUS() - start);

std::vector<float> abs_error;
GetModelOutputAndAbsError(
predictor, output_data_set[i], output_data_set_shapes[i], abs_error);
results.push_back(abs_error);
}

for (float abs_error : {1e-0, 1e-1, 1e-2}) {
float acc = CalOutAccuracy(results, abs_error);
LOG(INFO) << "acc: " << acc << ", if abs_error < " << abs_error;
ASSERT_GE(acc, 0.99);
}

LOG(INFO) << "================== Speed Report ===================";
LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads
<< ", warmup: " << FLAGS_warmup
<< ", iteration: " << FLAGS_iteration << ", spend "
<< cost_time / FLAGS_iteration / 1000.0 << " ms in average.";
}

} // namespace lite
} // namespace paddle
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace paddle {
namespace lite {

TEST(ch_ppocr_mobile_v2_0_det,
test_ch_ppocr_mobile_v2_0_det_fp32_v2_0_nnadapter) {
test_ch_ppocr_mobile_v2_0_det_fp32_v2_3_nnadapter) {
std::vector<std::string> nnadapter_device_names;
std::string nnadapter_context_properties;
std::vector<paddle::lite_api::Place> valid_places;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace paddle {
namespace lite {

TEST(ch_ppocr_mobile_v2_0_rec,
test_ch_ppocr_mobile_v2_0_rec_fp32_v2_0_nnadapter) {
test_ch_ppocr_mobile_v2_0_rec_fp32_v2_3_nnadapter) {
std::vector<std::string> nnadapter_device_names;
std::string nnadapter_context_properties;
std::vector<paddle::lite_api::Place> valid_places;
Expand Down
141 changes: 141 additions & 0 deletions lite/tests/api/test_ch_ppocr_server_v2_0_det_fp32_v2_3_nnadapter.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <vector>
#include "lite/api/paddle_api.h"
#include "lite/api/test/lite_api_test_helper.h"
#include "lite/api/test/test_helper.h"
#include "lite/tests/api/ocr_data_utility.h"
#include "lite/tests/api/utility.h"
#include "lite/utils/string.h"

DEFINE_string(data_dir, "", "data dir");
DEFINE_int32(iteration, 10, "iteration times to run");

namespace paddle {
namespace lite {

TEST(ch_ppocr_server_v2_0_det,
test_ch_ppocr_server_v2_0_det_fp32_v2_3_nnadapter) {
std::vector<std::string> nnadapter_device_names;
std::string nnadapter_context_properties;
std::vector<paddle::lite_api::Place> valid_places;
valid_places.push_back(
lite_api::Place{TARGET(kNNAdapter), PRECISION(kFloat)});
#if defined(LITE_WITH_ARM)
valid_places.push_back(lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
#elif defined(LITE_WITH_X86)
valid_places.push_back(lite_api::Place{TARGET(kX86), PRECISION(kFloat)});
#else
LOG(INFO) << "Unsupported host arch!";
return;
#endif
#if defined(NNADAPTER_WITH_HUAWEI_ASCEND_NPU)
nnadapter_device_names.emplace_back("huawei_ascend_npu");
nnadapter_context_properties = "HUAWEI_ASCEND_NPU_SELECTED_DEVICE_IDS=0";
#else
LOG(INFO) << "Unsupported NNAdapter device!";
return;
#endif
std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor = nullptr;
// Use the full api with CxxConfig to generate the optimized model
lite_api::CxxConfig cxx_config;
cxx_config.set_model_dir(FLAGS_model_dir);
cxx_config.set_valid_places(valid_places);
cxx_config.set_nnadapter_device_names(nnadapter_device_names);
cxx_config.set_nnadapter_context_properties(nnadapter_context_properties);
predictor = lite_api::CreatePaddlePredictor(cxx_config);
predictor->SaveOptimizedModel(FLAGS_model_dir,
paddle::lite_api::LiteModelType::kNaiveBuffer);
// Use the light api with MobileConfig to load and run the optimized model
paddle::lite_api::MobileConfig mobile_config;
mobile_config.set_model_from_file(FLAGS_model_dir + ".nb");
mobile_config.set_threads(FLAGS_threads);
mobile_config.set_power_mode(
static_cast<lite_api::PowerMode>(FLAGS_power_mode));
mobile_config.set_nnadapter_device_names(nnadapter_device_names);
mobile_config.set_nnadapter_context_properties(nnadapter_context_properties);
predictor = paddle::lite_api::CreatePaddlePredictor(mobile_config);

std::string raw_data_dir =
FLAGS_data_dir + std::string("/ICDAR_2015_50/raw_data");
std::string out_data_dir =
FLAGS_data_dir +
std::string("/ICDAR_2015_50/ch_ppocr_mobile_v2_0_out_data");
std::string images_shape_path =
FLAGS_data_dir + std::string("/ICDAR_2015_50/images_shape.txt");

auto input_lines = ReadLines(images_shape_path);
std::vector<std::string> input_names;
std::vector<std::vector<int64_t>> input_shapes;
for (auto line : input_lines) {
input_names.push_back(Split(line, ":")[0]);
input_shapes.push_back(Split<int64_t>(Split(line, ":")[1], " "));
}

std::vector<std::vector<float>> raw_data;
std::vector<std::vector<float>> gt_data;
for (size_t i = 0; i < FLAGS_iteration; i++) {
raw_data.push_back(
ReadRawData(raw_data_dir, input_names[i], input_shapes[i]));
}

FLAGS_warmup = 1;
for (int i = 0; i < FLAGS_warmup; ++i) {
fill_tensor(predictor, 0, raw_data[i].data(), input_shapes[i]);
predictor->Run();
}

double cost_time = 0;
std::vector<std::vector<float>> results;
for (size_t i = 0; i < raw_data.size(); ++i) {
fill_tensor(predictor, 0, raw_data[i].data(), input_shapes[i]);
predictor->Run();

double start = GetCurrentUS();
predictor->Run();
cost_time += (GetCurrentUS() - start);

auto output_tensor = predictor->GetOutput(0);
auto output_shape = output_tensor->shape();
auto output_data = output_tensor->data<float>();
ASSERT_EQ(output_shape.size(), 4UL);

int64_t output_size = 1;
for (auto dim : output_shape) {
output_size *= dim;
}
std::vector<float> ret(output_size);
memcpy(ret.data(), output_data, sizeof(float) * output_size);
results.push_back(ret);
gt_data.push_back(ReadRawData(out_data_dir, input_names[i], output_shape));
}

for (float abs_error : {1e-1, 1e-2, 1e-3, 1e-4}) {
float acc = CalOutAccuracy(results, gt_data, abs_error);
LOG(INFO) << "acc: " << acc << ", if abs_error < " << abs_error;
ASSERT_GE(CalOutAccuracy(results, gt_data, abs_error), 0.99);
}

LOG(INFO) << "================== Speed Report ===================";
LOG(INFO) << "Model: " << FLAGS_model_dir << ", threads num " << FLAGS_threads
<< ", warmup: " << FLAGS_warmup
<< ", iteration: " << FLAGS_iteration << ", spend "
<< cost_time / FLAGS_iteration / 1000.0 << " ms in average.";
}

} // namespace lite
} // namespace paddle
Loading