diff --git a/tools/pnnx/Releasenotes b/tools/pnnx/Releasenotes index 2718c0d29271..8665e4a2f67f 100644 --- a/tools/pnnx/Releasenotes +++ b/tools/pnnx/Releasenotes @@ -90,4 +90,11 @@ dev.1.0.24.20240712 1. Fix the bug of extract_sub_graph dev.1.0.25.20240715 -1. Support static qunantize for torch.fx mode \ No newline at end of file +1. Support static qunantize for torch.fx mode + +dev.1.0.26.20240723 +1. Add torch,logical_not and torch.nonzero +2. Add fold constants sub graph pass in pass level5 +3. Add trans unbind2squeeze pass in pass level6 +4. Fix tensor.fill +5. Fix torch.unbind \ No newline at end of file diff --git a/tools/pnnx/src/CMakeLists.txt b/tools/pnnx/src/CMakeLists.txt index db1addbd7c7c..d68787ed4de5 100644 --- a/tools/pnnx/src/CMakeLists.txt +++ b/tools/pnnx/src/CMakeLists.txt @@ -237,6 +237,7 @@ set(pnnx_pass_level2_SRCS pass_level2/torch_index_select.cpp pass_level2/torch_le.cpp pass_level2/torch_lgamma.cpp + pass_level2/torch_logical_not.cpp pass_level2/torch_logsumexp.cpp pass_level2/torch_lt.cpp pass_level2/torch_masked_select.cpp @@ -248,6 +249,7 @@ set(pnnx_pass_level2_SRCS pass_level2/torch_mv.cpp pass_level2/torch_narrow.cpp pass_level2/torch_ne.cpp + pass_level2/torch_nonzero.cpp pass_level2/torch_norm.cpp pass_level2/torch_normal.cpp pass_level2/torch_ones.cpp @@ -376,6 +378,7 @@ set(pnnx_pass_level5_SRCS pass_level5/fuse_static_linear.cpp pass_level5/normalize_einsum_equation.cpp pass_level5/unroll_rnn_op.cpp + pass_level5/fold_constants_sub_graph.cpp ) # add by senli 20240321 @@ -385,6 +388,7 @@ set(pnnx_pass_level6_SRCS pass_level6/trans_Stack2Unsqueeze.cpp pass_level6/trans_ReshapeAs2Reshape.cpp pass_level6/trans_TensorTypeAs2TensorTo.cpp + pass_level6/trans_Unbind2Squeeze.cpp ) set(pnnx_pass_sub_model_SRCS diff --git a/tools/pnnx/src/ir.cpp b/tools/pnnx/src/ir.cpp index 909996b4c603..eb5c19f95178 100644 --- a/tools/pnnx/src/ir.cpp +++ b/tools/pnnx/src/ir.cpp @@ -30,6 +30,29 @@ #include namespace pnnx { +static std::vector options = {"main", "replace", "delete"}; +static void get_op_name_label(std::string& src_str, std::string& name, std::string& label) +{ + + size_t pos = src_str.find_last_of('_'); + + if (pos != std::string::npos) { + name = src_str.substr(0, pos); + label = src_str.substr(pos + 1); + auto it = std::find(options.begin(), options.end(), label); + if (it == options.end()) + { + name = src_str; + label = ""; + } + + } else { + name = src_str; + label = ""; + } +} + + static bool type_is_integer(int type) { if (type == 1) return false; @@ -735,14 +758,16 @@ int Graph::load(const std::string& parampath, const std::string& binpath) std::istringstream iss(line); std::string type; - std::string name; + std::string new_op_name; int input_count = 0; int output_count = 0; - iss >> type >> name >> input_count >> output_count; - + iss >> type >> new_op_name >> input_count >> output_count; + std::string name; + std::string label; + get_op_name_label(new_op_name, name, label); Operator* op = new_operator(type, name); - + op->label = label; for (int j = 0; j < input_count; j++) { std::string operand_name; @@ -825,8 +850,9 @@ int Graph::save(const std::string& parampath, const std::string& binpath) for (const Operator* op : ops) { - fprintf(paramfp, "%-24s %-24s %d %d", op->type.c_str(), op->name.c_str(), (int)op->inputs.size(), (int)op->outputs.size()); - + std::string new_op_name = op->name + "_" + op->label; + fprintf(paramfp, "%-24s %-24s %d %d", op->type.c_str(), new_op_name.c_str(), (int)op->inputs.size(), (int)op->outputs.size()); + for (const Operand* oprand : op->inputs) { fprintf(paramfp, " %s", oprand->name.c_str()); @@ -3919,6 +3945,10 @@ int Graph::python_infer(const std::string& pypath, const std::string& binpath, { fprintf(pyfp, "torch.tensor(False)"); } + else if(op->type == "Tensor.fill") + { + fprintf(pyfp, "True"); + } else { fprintf(pyfp, "None"); @@ -4048,9 +4078,15 @@ int Graph::python_infer(const std::string& pypath, const std::string& binpath, } fprintf(pyfp, ")"); } + } - fprintf(pyfp, ")\n"); + fprintf(pyfp, ")"); + if(op->outputs.size() == 1 && op->type == "torch.unbind") + { + fprintf(pyfp, "[0]"); + } + fprintf(pyfp, "\n"); } else { diff --git a/tools/pnnx/src/ir.h b/tools/pnnx/src/ir.h index 2d02c03e7715..b2b7e0547b43 100644 --- a/tools/pnnx/src/ir.h +++ b/tools/pnnx/src/ir.h @@ -300,7 +300,7 @@ class Operator // keep std::string typed member the last for cross cxxabi compatibility std::string type; std::string name; - + std::string label = ""; // main delete replace std::vector inputnames; std::map params; std::map attrs; diff --git a/tools/pnnx/src/parse/pnnx_ir_parse.cpp b/tools/pnnx/src/parse/pnnx_ir_parse.cpp index 86228b1a5d02..b5b58f5d9d59 100644 --- a/tools/pnnx/src/parse/pnnx_ir_parse.cpp +++ b/tools/pnnx/src/parse/pnnx_ir_parse.cpp @@ -13,7 +13,6 @@ // specific language governing permissions and limitations under the License. #include "pnnx_ir_parse.h" - #include #include #include @@ -31,6 +30,27 @@ using namespace pnnx; namespace pnnx_ir { +static std::vector options = {"main", "replace", "delete"}; +static void get_op_name_label(std::string& src_str, std::string& name, std::string& label) +{ + + size_t pos = src_str.find_last_of('_'); + + if (pos != std::string::npos) { + name = src_str.substr(0, pos); + label = src_str.substr(pos + 1); + auto it = std::find(options.begin(), options.end(), label); + if (it == options.end()) + { + name = src_str; + label = ""; + } + + } else { + name = src_str; + label = ""; + } +} static size_t countSubstring(const std::string& str, const std::string& substr) { size_t count = 0; size_t pos = 0; @@ -765,13 +785,16 @@ int Graph::load(const std::string& parampath, const std::string& binpath) std::istringstream iss(line); std::string type; - std::string name; + std::string new_op_name; int input_count = 0; int output_count = 0; - iss >> type >> name >> input_count >> output_count; - + iss >> type >> new_op_name >> input_count >> output_count; + std::string name; + std::string label; + get_op_name_label(new_op_name, name, label); Operator* op = new_operator(type, name); + op->label = label; for (int j = 0; j < input_count; j++) { @@ -855,8 +878,8 @@ int Graph::save(const std::string& parampath, const std::string& binpath) for (const Operator* op : ops) { - fprintf(paramfp, "%-24s %-24s %d %d", op->type.c_str(), op->name.c_str(), (int)op->inputs.size(), (int)op->outputs.size()); - + std::string new_op_name = op->name + "_" + op->label; + fprintf(paramfp, "%-24s %-24s %d %d", op->type.c_str(), new_op_name.c_str(), (int)op->inputs.size(), (int)op->outputs.size()); for (const Operand* oprand : op->inputs) { fprintf(paramfp, " %s", oprand->name.c_str()); @@ -1041,7 +1064,8 @@ int Graph::save_param(const std::string& parampath, const std::vector& for (const Operator op : input_operators) { - fprintf(paramfp, "%-24s %-24s %d %d", op.type.c_str(), op.name.c_str(), (int)op.inputs.size(), (int)op.outputs.size()); + std::string new_op_name = op.name + "_" + op.label; + fprintf(paramfp, "%-24s %-24s %d %d", op.type.c_str(), new_op_name.c_str(), (int)op.inputs.size(), (int)op.outputs.size()); for (const Operand* oprand : op.inputs) { diff --git a/tools/pnnx/src/parse/pnnx_ir_parse.h b/tools/pnnx/src/parse/pnnx_ir_parse.h index 2ac4b09a93bf..370217eb3a9b 100644 --- a/tools/pnnx/src/parse/pnnx_ir_parse.h +++ b/tools/pnnx/src/parse/pnnx_ir_parse.h @@ -21,6 +21,7 @@ #include #include #include +#include namespace py = pybind11; #if BUILD_PNNX namespace torch { @@ -197,7 +198,7 @@ class Operator // keep std::string typed member the last for cross cxxabi compatibility std::string type; std::string name; - + std::string label = ""; // main delete replace std::vector inputnames; std::map params; std::map attrs; diff --git a/tools/pnnx/src/pass_level2/torch_logical_not.cpp b/tools/pnnx/src/pass_level2/torch_logical_not.cpp new file mode 100644 index 000000000000..81654c9a65e3 --- /dev/null +++ b/tools/pnnx/src/pass_level2/torch_logical_not.cpp @@ -0,0 +1,40 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "pass_level2.h" + +namespace pnnx { + +class torch_logical_not : public GraphRewriterPass +{ +public: + const char* match_pattern_graph() const + { + return R"PNNXIR(7767517 +3 2 +pnnx.Input input_0 0 1 input +aten::logical_not op_0 1 1 input out +pnnx.Output output 1 0 out +)PNNXIR"; + } + + const char* type_str() const + { + return "torch.logical_not"; + } +}; + +REGISTER_GLOBAL_PNNX_GRAPH_REWRITER_PASS(torch_logical_not, 20) + +} // namespace pnnx diff --git a/tools/pnnx/src/pass_level2/torch_nonzero.cpp b/tools/pnnx/src/pass_level2/torch_nonzero.cpp new file mode 100644 index 000000000000..a730b8295af5 --- /dev/null +++ b/tools/pnnx/src/pass_level2/torch_nonzero.cpp @@ -0,0 +1,40 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "pass_level2.h" + +namespace pnnx { + +class torch_nonzero : public GraphRewriterPass +{ +public: + const char* match_pattern_graph() const + { + return R"PNNXIR(7767517 +3 2 +pnnx.Input input_0 0 1 input +aten::nonzero op_0 1 1 input out +pnnx.Output output 1 0 out +)PNNXIR"; + } + + const char* type_str() const + { + return "torch.nonzero"; + } +}; + +REGISTER_GLOBAL_PNNX_GRAPH_REWRITER_PASS(torch_nonzero, 20) + +} // namespace pnnx diff --git a/tools/pnnx/src/pass_level5.cpp b/tools/pnnx/src/pass_level5.cpp index 32ffc6bc9c30..0af5f6a03506 100644 --- a/tools/pnnx/src/pass_level5.cpp +++ b/tools/pnnx/src/pass_level5.cpp @@ -62,7 +62,7 @@ #include "pass_level4/canonicalize.h" #include "pass_level3/fuse_index_expression.h" #include "pass_level5/fuse_pixel_unshuffle.h" - +#include "pass_level5/fold_constants_sub_graph.h" namespace pnnx { void pass_level5(std::shared_ptr g, const std::set& foldable_constants, const std::string& foldable_constants_zippath) @@ -145,6 +145,7 @@ void pass_level5(std::shared_ptr g, const std::set& fo dead_code_elimination(g); + fold_constants_sub_graph(g); canonicalize(g); } diff --git a/tools/pnnx/src/pass_level5/fold_constants_sub_graph.cpp b/tools/pnnx/src/pass_level5/fold_constants_sub_graph.cpp new file mode 100644 index 000000000000..cfeed1bbf9be --- /dev/null +++ b/tools/pnnx/src/pass_level5/fold_constants_sub_graph.cpp @@ -0,0 +1,231 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "fold_constants_sub_graph.h" +#include + +#include "storezip.h" +#include "pass_level4/dead_code_elimination.h" + +namespace pnnx { + +void fold_constants_sub_graph(std::shared_ptr graph) +{ + + // StoreZipReader zip; + // zip.open(all_tensor_zippath); + + // find input node [like pnnx.Input torch.arrage] + // Attribute node like[pnnx.Attribute] + std::queue input_node_list; + std::queue attribute_node_list; + for(size_t i = 0; i < graph->ops.size(); i++) + { + Operator* cur_op = graph->ops[i]; + if (cur_op->type == "pnnx.Attribute") + { + attribute_node_list.push(cur_op); + } + else if(cur_op->type == "pnnx.Input" || cur_op->inputs.size() == 0) + { + input_node_list.push(cur_op); + } + } + + // Assign the nodes associated with the input nodes as main labels + std::queue main_node_list; + while(!input_node_list.empty()) + { + Operator* input_node = input_node_list.front(); + input_node_list.pop(); + input_node->label = "main"; + std::vector output_operands = input_node->outputs; + for(auto out: output_operands) + { + auto consumers = out->consumers; + for(auto consumer: consumers) + { + if(consumer->label == "") + { + main_node_list.push(consumer); + } + } + } + while(!main_node_list.empty()) + { + Operator* cur_node = main_node_list.front(); + main_node_list.pop(); + cur_node->label = "main"; + std::vector output_operands = cur_node->outputs; + for(auto out: output_operands) + { + auto consumers = out->consumers; + for(auto consumer: consumers) + { + if(consumer->label == "") + { + main_node_list.push(consumer); + } + } + } + } + } + // Assign the nodes associated with the attribute nodes as main, delete or replace labels + std::queue delete_replace_node_list; + while(!attribute_node_list.empty()) + { + Operator* input_attribute_node = attribute_node_list.front(); + attribute_node_list.pop(); + std::vector output_operands = input_attribute_node->outputs; + for(auto out: output_operands) + { + auto consumers = out->consumers; + for(auto consumer: consumers) + { + if(consumer->label == "") + { + delete_replace_node_list.push(consumer); + } + else if(consumer->label == "main") + { + input_attribute_node->label = "main"; + } + + } + } + + if(input_attribute_node->label == "") + { + input_attribute_node->label = "delete"; + } + + while(!delete_replace_node_list.empty()) + { + Operator* cur_node = delete_replace_node_list.front(); + delete_replace_node_list.pop(); + + std::vector output_operands = cur_node->outputs; + bool consumer_node_is_main = false; + for(auto out: output_operands) + { + auto consumers = out->consumers; + for(auto consumer: consumers) + { + if(consumer->label == "") + { + delete_replace_node_list.push(consumer); + } + else if(consumer->label == "main") + { + consumer_node_is_main = true; + } + } + } + if(consumer_node_is_main) + { + cur_node->label = "replace"; + } + else + { + cur_node->label = "delete"; + } + } + + } + + // process replace node an delete node + // while(true) + // { + // bool matched = false; + // for(size_t i = 0; i < graph->ops.size(); i++) + // { + // Operator* cur_op = graph->ops[i]; + // if (cur_op->label == "") + // { + // fprintf(stderr, "############# find a not sign node, node name is: %s\n", cur_op->name.c_str()); + // } + // else if(cur_op->label == "replace") + // { + // matched = true; + // std::vector outputs = cur_op->outputs; + // for(auto out: outputs) + // { + // std::string name = out->name; + // // replace cur op to pnnx.Attribute node + // Operator* op_new = graph->new_operator_after("pnnx.Attribute", std::string("pnnx_fold_") + name, cur_op); + // op_new->label == "main"; + // op_new->attrs["data"] = Attribute(); + // Attribute& t2 = op_new->attrs["data"]; + // t2.type = out->type; + // t2.shape = out->shape; + // size_t size = zip.get_file_size(name); + // t2.data.resize(size); + // zip.read_file(name, t2.data.data()); + + // op_new->outputs.push_back(out); + // out->producer = op_new; + // } + // std::vector cur_op_inputs = cur_op->inputs; + // for(auto input: cur_op_inputs) + // { + // input->consumers.erase(std::find(input->consumers.begin(), input->consumers.end(), cur_op)); + // } + // cur_op->inputs.clear(); + // cur_op->outputs.clear(); + // graph->ops.erase(graph->ops.begin() + i); + // delete cur_op; + // break; + // } + // else if(cur_op->label == "delete") + // { + // matched = true; + // std::vector cur_op_inputs = cur_op->inputs; + // for(auto input: cur_op_inputs) + // { + // input->consumers.erase(std::find(input->consumers.begin(), input->consumers.end(), cur_op)); + // } + // std::vector outputs = cur_op->outputs; + // for(auto out: outputs) + // { + // for(auto consumer: out->consumers) + // { + // consumer->inputs.erase(std::find(consumer->inputs.begin(), consumer->inputs.end(), out)); + // } + // } + // for(auto out: outputs) + // { + // out->producer = 0; + // out->consumers.clear(); + // graph->operands.erase(std::find(graph->operands.begin(), graph->operands.end(), out)); + // delete out; + // } + // cur_op->inputs.clear(); + // cur_op->outputs.clear(); + + // graph->ops.erase(graph->ops.begin() + i); + // delete cur_op; + // break; + // } + // } + // if (!matched) + // break; + + // } + + // zip.close(); + // dce + dead_code_elimination(graph); +} + +} // namespace pnnx diff --git a/tools/pnnx/src/pass_level5/fold_constants_sub_graph.h b/tools/pnnx/src/pass_level5/fold_constants_sub_graph.h new file mode 100644 index 000000000000..48966731dc1e --- /dev/null +++ b/tools/pnnx/src/pass_level5/fold_constants_sub_graph.h @@ -0,0 +1,21 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "ir.h" + +namespace pnnx { + +void fold_constants_sub_graph(std::shared_ptr graph); + +} // namespace pnnx diff --git a/tools/pnnx/src/pass_level6.cpp b/tools/pnnx/src/pass_level6.cpp index d5ab5a0e8ccb..61bf27d3f55f 100644 --- a/tools/pnnx/src/pass_level6.cpp +++ b/tools/pnnx/src/pass_level6.cpp @@ -19,7 +19,7 @@ #include "pass_level6/trans_Stack2Unsqueeze.h" #include "pass_level6/trans_ReshapeAs2Reshape.h" #include "pass_level6/trans_TensorTypeAs2TensorTo.h" - +#include "pass_level6/trans_Unbind2Squeeze.h" #include "config.h" namespace pnnx { @@ -39,6 +39,8 @@ void pass_level6(std::shared_ptr g, const std::set& fo trans_TensorTypeAs2TensorTo(g); fprintf(stderr, "############# finish trans_TensorTypeAs2TensorTo\n"); + trans_Unbind2Squeeze(g); + fprintf(stderr, "############# finish trans_Unbind2Squeeze\n"); } } // namespace pnnx diff --git a/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.cpp b/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.cpp new file mode 100644 index 000000000000..8fe85d131e0d --- /dev/null +++ b/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.cpp @@ -0,0 +1,63 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "trans_Stack2Unsqueeze.h" + +#include +#include "pass_level2.h" + +namespace pnnx { + +void trans_Unbind2Squeeze(std::shared_ptr graph) +{ + while (1) + { + bool matched = false; + + for (size_t i = 0; i < graph->ops.size(); i++) + { + Operator* op = graph->ops[i]; + + if (op->type != "torch.unbind") + continue; + + // get input num + int dim = op->params["dim"].i; + std::vector input_shape = op->inputs[0]->shape; + if( input_shape[dim] == 1) + { + matched = true; + op->type = "torch.squeeze"; + std::string str = op->name; + std::string from = "torch.unbind"; + std::string to = "torch.squeeze"; + + // to find sub str + size_t start_pos = str.find(from); + if(start_pos != std::string::npos) { + // replace sub str + str.replace(start_pos, from.length(), to); + } + op->name = str; + break; + } + + } + + if (!matched) + break; + } +} + +} // namespace pnnx diff --git a/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.h b/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.h new file mode 100644 index 000000000000..dfc05cc121bc --- /dev/null +++ b/tools/pnnx/src/pass_level6/trans_Unbind2Squeeze.h @@ -0,0 +1,22 @@ + +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "ir.h" + +namespace pnnx { + +void trans_Unbind2Squeeze(std::shared_ptr graph); + +} // namespace pnnx diff --git a/tools/pnnx/src/py_proj.cpp b/tools/pnnx/src/py_proj.cpp index 6d38c1f4e685..d1aec7a413ab 100644 --- a/tools/pnnx/src/py_proj.cpp +++ b/tools/pnnx/src/py_proj.cpp @@ -5,7 +5,7 @@ // #include #define STRINGIFY(x) #x #define MACRO_STRINGIFY(x) STRINGIFY(x) -#define MYLIBRARY_VERSION "dev.1.0.25.20240715" +#define MYLIBRARY_VERSION "dev.1.0.26.20240716" using namespace pnnx_graph; using namespace pnnx_ir; namespace py = pybind11; @@ -65,6 +65,7 @@ PYBIND11_MODULE(ptx, m) .def_readwrite("outputs", &Operator::outputs) .def_readwrite("type", &Operator::type) .def_readwrite("name", &Operator::name) + .def_readwrite("label", &Operator::label) .def_readwrite("inputnames", &Operator::inputnames) .def_readwrite("params", &Operator::params) .def_readwrite("attrs", &Operator::attrs);