diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 378002a74416..c32449546f77 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -23,6 +23,7 @@ import tvm from tvm.ir import IRModule +from ... import nd as _nd from .. import analysis from .. import ty as _ty from .. import expr as _expr @@ -954,10 +955,12 @@ def extract_parameters(self, program, scope=None): if not var.persistable: continue if isinstance(scope, dict): - self.params[name] = scope[name] + self.params[name] = _nd.array(scope[name]) else: - self.params[name] = np.array(scope.var(name).get_tensor()) - self.nodes[name] = _expr.const(self.params[name]) + self.params[name] = _nd.array(np.array(scope.var(name).get_tensor())) + shape = self.params[name].shape + dtype = self.params[name].dtype + self.nodes[name] = new_var(name, shape=shape, dtype=dtype) def check_input_shape(self, op, block): """Check the shape information of model's inputs, fixed shape is recommended.""" @@ -1048,6 +1051,12 @@ def from_translated_layer(self, layer, shape_dict): free_vars = analysis.free_vars(outputs) func = _function.Function(free_vars, outputs) mod = IRModule.from_expr(func) + # remove unused parameters + final_params = dict() + for var in free_vars: + if var.name_hint in self.params: + final_params[var.name_hint] = self.params[var.name_hint] + self.params = final_params return mod, self.params @@ -1056,6 +1065,24 @@ def from_paddle(program_or_layer, shape_dict=None, scope=None): PaddlePaddle Program/TranslatedLayer represent the computation graph of PaddlePaddle model, and PaddlePaddle scope stores all the weights of PaddlePaddle model. + + Parameters + ---------- + program_or_layer : object of `paddle.static.Program` or `paddle.jit.TranslatedLayer` + Loaded model by `paddle.static.load_inference_model` or `paddle.jit.load` + + shape_dict : dict of str to tuple/list, optional + The input shape of model + + scope : object of `paddle.static.Scope`, optional + The scope that saves all the weights of model, use `paddle.static.global_scope` by default + + Returns + ------- + mod : tvm.IRModule + The relay module for compilation + + params : dict of str to tvm.nd.NDArray """ import paddle diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 1d64f947e68a..e3d1fc9daf2b 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -80,9 +80,8 @@ def verify_model(func, input_data, rtol=1e-5, atol=1e-5): baseline_outputs = (baseline_outputs.numpy(),) mod, params = relay.frontend.from_paddle(baseline_model, input_shape_dict) - parms_num = min(len(input_names), len(mod["main"].params)) compiled_names = [] - for arg in mod["main"].params[:parms_num]: + for arg in mod["main"].params: assert arg.name_hint in input_names or arg.name_hint in params if arg.name_hint in input_names: compiled_names.append(arg.name_hint)