Skip to content

Commit

Permalink
update onnx opset to 19
Browse files Browse the repository at this point in the history
  • Loading branch information
Zheng-Bicheng committed May 24, 2024
1 parent e703c59 commit e22930e
Show file tree
Hide file tree
Showing 12 changed files with 24 additions and 20 deletions.
11 changes: 6 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,11 @@ configure_file(${PROJECT_SOURCE_DIR}/paddle2onnx/mappers_registry.h.in ${PROJECT
if(PADDLE2ONNX_DEBUG)
add_definitions(-DPADDLE2ONNX_DEBUG)
endif()
# Set C++11 as standard for the whole project
if(NOT MSVC)
set(CMAKE_CXX_STANDARD 11)
endif(NOT MSVC)

# Set C++14 as standard for the whole project
if(NOT DEFINED CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
endif()

# setting max opset version for onnx
# if you build from other version of onnx
Expand All @@ -37,7 +38,7 @@ add_definitions(-DPADDLE2ONNX_LIB)
# Third dependency: onnx
if(NOT TARGET onnx_proto)
if(NOT ONNX_NAMESPACE)
set(ONNX_NAMESPACE "paddle2onnx")
set(ONNX_NAMESPACE "onnx")
endif()
add_definitions("-DONNX_NAMESPACE=${ONNX_NAMESPACE}")

Expand Down
2 changes: 1 addition & 1 deletion paddle2onnx/mapper/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ void HardSigmoidMapper::Opset7() {
void SwishMapper::Opset7() {
auto input_info = GetInput("X");
auto output_info = GetOutput("Out");
std::shared_ptr<paddle2onnx::NodeProto> sigmod_node = nullptr;
std::shared_ptr<ONNX_NAMESPACE::NodeProto> sigmod_node = nullptr;

if (HasAttr("beta")) {
float temp_beta = 1.0;
Expand Down
4 changes: 2 additions & 2 deletions paddle2onnx/mapper/exporter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ void ModelExporter::ProcessGraphDumplicateNames(
}
}

void ModelExporter::SaveExternalData(::paddle2onnx::GraphProto* graph,
void ModelExporter::SaveExternalData(::ONNX_NAMESPACE::GraphProto* graph,
const std::string& external_file_path,
bool* save_external) {
P2OLogger() << "The exported ONNX model is bigger than 2G, external data "
Expand Down Expand Up @@ -286,7 +286,7 @@ void ModelExporter::SaveExternalData(::paddle2onnx::GraphProto* graph,
continue;
}

tensor->set_data_location(TensorProto::EXTERNAL);
tensor->set_data_location(ONNX_NAMESPACE::TensorProto::EXTERNAL);
auto external_data = tensor->add_external_data();
external_data->set_key("location");
external_data->set_value(file_name);
Expand Down
2 changes: 1 addition & 1 deletion paddle2onnx/mapper/exporter.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ struct ModelExporter {
std::set<std::string>* unsupported_ops,
bool enable_experimental_op);

void SaveExternalData(::paddle2onnx::GraphProto* graph,
void SaveExternalData(ONNX_NAMESPACE::GraphProto* graph,
const std::string& external_file_path,
bool* save_external = nullptr);

Expand Down
2 changes: 1 addition & 1 deletion paddle2onnx/mapper/mapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class Mapper {
// the return value in [7, MAX_ONNX_OPSET_VERSION], represent the minimum
// opset_version
// if return value < 0, means the op is not supported.
virtual int32_t GetMinOpset(bool verbose = false) { return 7; }
virtual int32_t GetMinOpset(bool verbose) { return 7; }

virtual bool IsExportAsCustomOp() { return export_as_custom_op; }

Expand Down
6 changes: 3 additions & 3 deletions paddle2onnx/mapper/quantize_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -615,9 +615,9 @@ void QuantizeModelProcessor::AddQDQInModel(
} else {
// Handle the following situations
// conv conv
// / | \ -> / \
// conv conv scale DQD scale
// / \
// / | \ -> / \
// conv conv scale DQD scale
// / \
// conv conv
std::vector<std::shared_ptr<ONNX_NAMESPACE::NodeProto>> except_nodes;
auto next_nodes = name2node_dict_[name];
Expand Down
5 changes: 4 additions & 1 deletion paddle2onnx/mapper/tensor/atan2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@
// limitations under the License.

#include "paddle2onnx/mapper/tensor/atan2.h"
#define M_PI 3.14159265358979323846 /* pi */

#ifndef M_PI
#define M_PI 3.14159265358979323846264338327950288
#endif

namespace paddle2onnx {
REGISTER_MAPPER(atan2, Atan2Mapper)
Expand Down
4 changes: 2 additions & 2 deletions paddle2onnx/mapper/tensor/reduce_logsumexp.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ class ReduceLogSumExpMapper : public Mapper {
}

void Opset18() override;
void Opset11();
void Opset11() override;

int32_t GetMinOpset(bool verbose = false);
int32_t GetMinOpset(bool verbose = false) override;

private:
bool keep_dim_;
Expand Down
2 changes: 1 addition & 1 deletion paddle2onnx/optimizer/convert_fp32_to_fp16.cc
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,7 @@ void ConvertFp32ToFp16::Convert(ONNX_NAMESPACE::ModelProto* model) {
op_block_list_.insert(op_block_list_.end(), custom_ops_.begin(),
custom_ops_.end());
}
shape_inference::InferShapes(*model);
ONNX_NAMESPACE::shape_inference::InferShapes(*model);
// 1 if it is a FP16 model, skip this
if (IsFP16Model(*model)) {
P2OLogger() << "[Info] The input ONNX Model is a FP16 model." << std::endl;
Expand Down
2 changes: 1 addition & 1 deletion paddle2onnx/optimizer/paddle2onnx_optimizer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

namespace ONNX_NAMESPACE {
namespace optimization {

using namespace paddle2onnx;
ONNX_NAMESPACE::ModelProto OptimizeOnnxModel(
const ONNX_NAMESPACE::ModelProto& model_proto) {
OptimizerOption option;
Expand Down
2 changes: 1 addition & 1 deletion third_party/onnx
Submodule onnx updated 1360 files
2 changes: 1 addition & 1 deletion third_party/optimizer
Submodule optimizer updated 68 files
+61 −21 .github/workflows/build-and-test.yml
+4 −1 .gitmodules
+48 −17 CMakeLists.txt
+7 −0 MANIFEST.in
+24 −1 README.md
+1 −1 VERSION_NUMBER
+22 −0 cmake/utils.cmake
+34 −16 examples/onnx_optimizer_exec.cpp
+34 −19 onnxoptimizer/__init__.py
+15 −0 onnxoptimizer/__main__.py
+145 −0 onnxoptimizer/c_api/onnxoptimizer_c_api.cc
+30 −0 onnxoptimizer/c_api/onnxoptimizer_c_api.h
+33 −6 onnxoptimizer/cpp2py_export.cc
+309 −0 onnxoptimizer/model_util.cc
+23 −0 onnxoptimizer/model_util.h
+84 −0 onnxoptimizer/onnxoptimizer_main.py
+7 −15 onnxoptimizer/optimize.h
+0 −4 onnxoptimizer/pass.cc
+7 −4 onnxoptimizer/pass.h
+5 −13 onnxoptimizer/pass_registry.cc
+55 −11 onnxoptimizer/pass_registry.h
+63 −0 onnxoptimizer/passes/adjust_add.h
+91 −0 onnxoptimizer/passes/adjust_slice_and_matmul.h
+32 −0 onnxoptimizer/passes/bitscast.h
+329 −0 onnxoptimizer/passes/cse_util.h
+134 −0 onnxoptimizer/passes/data_type.h
+70 −0 onnxoptimizer/passes/eliminate_common_subexpression.h
+55 −0 onnxoptimizer/passes/eliminate_consecutive_idempotent_ops.h
+52 −72 onnxoptimizer/passes/eliminate_duplicate_initializer.h
+25 −16 onnxoptimizer/passes/eliminate_if_with_const_cond.h
+41 −0 onnxoptimizer/passes/eliminate_nop_concat.h
+47 −0 onnxoptimizer/passes/eliminate_nop_expand.h
+3 −2 onnxoptimizer/passes/eliminate_nop_flatten.h
+12 −30 onnxoptimizer/passes/eliminate_nop_pad.h
+83 −0 onnxoptimizer/passes/eliminate_nop_reshape.h
+53 −0 onnxoptimizer/passes/eliminate_nop_split.h
+122 −0 onnxoptimizer/passes/eliminate_nop_with_unit.h
+73 −0 onnxoptimizer/passes/eliminate_shape_gather.h
+64 −0 onnxoptimizer/passes/eliminate_shape_op.h
+111 −0 onnxoptimizer/passes/eliminate_slice_after_shape.h
+3 −4 onnxoptimizer/passes/extract_constant_to_initializer.h
+4 −3 onnxoptimizer/passes/fuse_add_bias_into_conv.h
+127 −99 onnxoptimizer/passes/fuse_bn_into_conv.h
+128 −0 onnxoptimizer/passes/fuse_concat_into_reshape.h
+4 −3 onnxoptimizer/passes/fuse_consecutive_log_softmax.h
+20 −53 onnxoptimizer/passes/fuse_consecutive_reduce_unsqueeze.h
+89 −0 onnxoptimizer/passes/fuse_consecutive_slices.h
+7 −31 onnxoptimizer/passes/fuse_consecutive_squeezes.h
+2 −1 onnxoptimizer/passes/fuse_consecutive_transposes.h
+86 −0 onnxoptimizer/passes/fuse_consecutive_unsqueezes.h
+5 −7 onnxoptimizer/passes/fuse_matmul_add_bias_into_gemm.h
+44 −79 onnxoptimizer/passes/fuse_pad_into_conv.h
+152 −0 onnxoptimizer/passes/fuse_pad_into_pool.h
+98 −0 onnxoptimizer/passes/fuse_qkv.h
+90 −0 onnxoptimizer/passes/logging.h
+25 −0 onnxoptimizer/passes/pass_util.cc
+480 −0 onnxoptimizer/passes/pass_util.h
+98 −0 onnxoptimizer/passes/rename_input_output.h
+157 −0 onnxoptimizer/passes/replace_einsum_with_matmul.h
+66 −0 onnxoptimizer/passes/rewrite_input_dtype.h
+37 −0 onnxoptimizer/passes/set_unique_name_for_nodes.h
+94 −0 onnxoptimizer/passes/string_utils.h
+182 −0 onnxoptimizer/passes/tensor_util.cc
+25 −0 onnxoptimizer/passes/tensor_util.h
+3,442 −1,237 onnxoptimizer/test/optimizer_test.py
+29 −12 setup.py
+1 −1 third_party/onnx
+1 −0 third_party/protobuf

0 comments on commit e22930e

Please sign in to comment.