Skip to content

Commit

Permalink
1. Fix the bug of trans_expression2TupleConstruct 2. Add skip_pass_le…
Browse files Browse the repository at this point in the history
…vel6 and only_save_main mod 3.Process multi prim::TupleConstruct node at output
  • Loading branch information
sen.li committed Jul 11, 2024
1 parent 676db70 commit 33c50c1
Show file tree
Hide file tree
Showing 9 changed files with 356 additions and 114 deletions.
25 changes: 17 additions & 8 deletions tools/pnnx/Releasenotes
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ dev.1.0.5.20240508
1. Synchronize the main ncnn repository
2. Fix missing approximate parameters of nn.GELU


dev.1.0.6.20240511
1. Add new pass trans_Stack2Unsqueeze, When using torch.stack with a single input and effectively achieving the same result as torch.unsqueeze

Expand All @@ -47,32 +48,40 @@ dev.1.0.13.20240530
1. Trans string to char in getInputType function

dev.1.0.14.20240531
1. Fix bug of make_index_expression for gen tensor.index infer op
1. Fix bug of make_index_expression for gen tensor.index infer op

dev.1.0.15.20240603
1. Support parse Tensor.reshape_as
2. Add trans_ReshapeAs2Reshape pass

dev.1.0.16.20240605
1. fix bug of Tensor.index with two inputs


dev.1.0.17.20240606
1. Add trans_TensorTypeAs2TensorTo pass in pass level 7

dev.1.0.18.20240613
1. Skip conv2d nodes of type NoneType


dev.1.0.19.20240614
1. Add extracting sub graph function

dev.1.0.20.20240617
dev.1.0.20.20240620
1. Add loop op parse function
2. Support export sub_model
3. Support load input tensor to export
4. Support torchvision.ops.nms


dev.1.0.21.20240619
1. Support export sub_model
dev.1.0.21.20240627
1. Support parse multi block
2. Support if block

dev.1.0.22.20240620
1. Support load input tensor to export
dev.1.0.22.20240709
1. Process multi prim::TupleConstruct node at output

dev.1.0.23.20240627
1. Support If/Loop block
dev.1.0.23.20240711
1. Fix the bug of trans_expression2TupleConstruct
2. Add skip_pass_level6 and only_save_main mod
154 changes: 112 additions & 42 deletions tools/pnnx/src/ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1432,10 +1432,24 @@ static std::string make_index_expression(const Operator* op)
indices_index++;
}
size_t pos = 0;
if ((pos = index_expr.find("@")) != std::string::npos) {
while((pos = index_expr.find("@")) != std::string::npos) {
index_expr.replace(pos, 1, "v_");
}
for(int i = 0; i < shape.size(); i++)
int input_size = op->inputs.size();
int loop_num = 0;
if(input_size == 1)
{
int indice_num = op->params.at("indice_num").i;
loop_num = shape.size() - indice_num + 1;
}
else
{
loop_num = shape.size() - (input_size - 1) + 1;
}

// fprintf(stderr, "############# indice_num: %s\n", std::to_string(indice_num).c_str());
// fprintf(stderr, "############# loop_num: %s\n", std::to_string(loop_num).c_str());
for(int i = 0; i < loop_num; i++)
{
if ( i == indices_index)
{
Expand All @@ -1446,7 +1460,7 @@ static std::string make_index_expression(const Operator* op)
out_index_expr = out_index_expr + ":";

}
if ( i != shape.size() - 1)
if ( i != loop_num - 1)
{
out_index_expr = out_index_expr + ",";
}
Expand Down Expand Up @@ -1822,20 +1836,30 @@ int Graph::python(const std::string& pypath, const std::string& pnnxbinpath)
}
else if (op->type == "Tensor.index")
{
// index expr
// if (op->inputs.size() == 2)
// {
// std::string expanded_expr = expand_expression(op->inputs[1]->producer);
// fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), expanded_expr.c_str());
// }
// else
// {
// std::string index_expr = make_index_expression(op);
// fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());
// }
std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());

if(!skip_pass_level6)
{

fprintf(stderr, "############# gen python with Tensor.index at %s\n", op->name.c_str());

std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());

}
else
{
fprintf(stderr, "############# gen python with Tensor.index at %s\n", op->name.c_str());
// index expr
if (op->inputs.size() == 2)
{
std::string expanded_expr = expand_expression(op->inputs[1]->producer);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), expanded_expr.c_str());
}
else
{
std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());
}
}
}
else if (op->type == "Tensor.expand")
{
Expand Down Expand Up @@ -3400,19 +3424,30 @@ int Graph::python_infer(const std::string& pypath, const std::string& binpath,
}
else if (op->type == "Tensor.index")
{
// index expr
// if (op->inputs.size() == 2)
// {
// std::string expanded_expr = expand_expression(op->inputs[1]->producer);
// fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), expanded_expr.c_str());
// }
// else
// {
// std::string index_expr = make_index_expression(op);
// fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());
// }
std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());
if(!skip_pass_level6)
{

fprintf(stderr, "############# gen python with Tensor.index at %s\n", op->name.c_str());

std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());

}
else
{
fprintf(stderr, "############# gen python with Tensor.index at %s\n", op->name.c_str());
// index expr
if (op->inputs.size() == 2)
{
std::string expanded_expr = expand_expression(op->inputs[1]->producer);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), expanded_expr.c_str());
}
else
{
std::string index_expr = make_index_expression(op);
fprintf(pyfp, "v_%s = v_%s[%s]\n", sanitize_identifier(op->outputs[0]->name).c_str(), sanitize_identifier(op->inputs[0]->name).c_str(), index_expr.c_str());
}
}
}
else if (op->type == "Tensor.expand")
{
Expand Down Expand Up @@ -4029,29 +4064,64 @@ int Graph::python_infer(const std::string& pypath, const std::string& binpath,

// return if pre node type is TupleConstruct, max_tensor_index not add one add by senli[pnnx_infer]
{
// bool TupleConstruct_flag = false;
// int max_tensor_index = 0;
// for (const Operator* op : ops)
// {
// if (op->type == "pnnx.Output")
// {
// std::vector<Operand*> inputs = op->inputs;
// for (const Operand* tensor : inputs)
// {
// Operator* pre_op = tensor->producer;
// if (pre_op->type == "prim::TupleConstruct")
// {
// TupleConstruct_flag = true;
// }
// }
// int num = std::stoi(op->inputs[0]->name);
// max_tensor_index = (max_tensor_index > num) ? max_tensor_index : num;
// }
// }

bool TupleConstruct_flag = false;
int max_tensor_index = 0;
for (const Operator* op : ops)
std::queue<Operator*> output_queue;
for (auto op : ops)
{
if (op->type == "pnnx.Output")
{
std::vector<Operand*> inputs = op->inputs;
for (const Operand* tensor : inputs)
output_queue.push(op);
break;
}
}
while(!output_queue.empty())
{
auto cur_output_op = output_queue.front();
output_queue.pop();
std::vector<Operand*> inputs = cur_output_op->inputs;
for (const Operand* tensor : inputs)
{
Operator* pre_op = tensor->producer;
if (pre_op->type == "prim::TupleConstruct")
{
Operator* pre_op = tensor->producer;
if (pre_op->type == "prim::TupleConstruct")
TupleConstruct_flag = true;
output_queue.push(pre_op);
}
else
{
for(auto out: pre_op->outputs)
{
TupleConstruct_flag = true;
int num = std::stoi(out->name);
max_tensor_index = (max_tensor_index > num) ? max_tensor_index : num;
}

}
int num = std::stoi(op->inputs[0]->name);
max_tensor_index = (max_tensor_index > num) ? max_tensor_index : num;
}
}
if (!TupleConstruct_flag)
{
max_tensor_index++;
}

max_tensor_index++;

fprintf(pyfp, " intermediate = {}\n");
fprintf(pyfp, " for i in range(%d):\n", max_tensor_index);
fprintf(pyfp, " key = 'v_' + str(i)\n");
Expand Down
2 changes: 1 addition & 1 deletion tools/pnnx/src/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ class Graph

std::vector<Operator*> ops;
std::vector<Operand*> operands;

int skip_pass_level6 = 0;
private:
Graph(const Graph& rhs);
Graph& operator=(const Graph& rhs);
Expand Down
Loading

0 comments on commit 33c50c1

Please sign in to comment.