Skip to content

Commit

Permalink
Reduce squeeze2_matmul_fuse_pass, flattent tests time (#47098)
Browse files Browse the repository at this point in the history
* Add missing fp32 config and reduce the testing combination

* Reduce trt matmul pass test max examples
  • Loading branch information
zlsh80826 authored Oct 19, 2022
1 parent be273ea commit 1a14d01
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 32]).astype(np.float32)

for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
Expand Down Expand Up @@ -128,7 +128,7 @@ def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 32, 64]).astype(np.float32)

for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
Expand Down Expand Up @@ -166,8 +166,8 @@ def sample_predictor_configs(

def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 8, 8]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64, 768]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 256]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 32, 64]}

def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
Expand Down Expand Up @@ -226,7 +226,7 @@ def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)

for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2, 3]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
Expand Down Expand Up @@ -264,7 +264,7 @@ def sample_predictor_configs(

def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 32, 32]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}

def clear_dynamic_shape():
Expand Down Expand Up @@ -294,6 +294,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):

# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
Expand Down Expand Up @@ -323,7 +324,7 @@ def sample_program_configs(self):
def generate_input(batch):
return np.random.random([batch, 8, 8, 8]).astype(np.float32)

for batch in [1, 2, 4]:
for batch in [1, 4]:
for axis in [0, 1, 2, 3, 4]:
for type in ["flatten", "flatten2"]:
if type == "flatten":
Expand Down Expand Up @@ -361,7 +362,7 @@ def sample_predictor_configs(

def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 4, 4, 4]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 32, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 16, 16, 8]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 16, 16, 8]}

def clear_dynamic_shape():
Expand Down Expand Up @@ -391,6 +392,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):

# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def sample_program_config(self, draw):

def test(self):
self.run_and_statis(quant=False,
max_examples=50,
max_examples=25,
passes=["trt_squeeze2_matmul_fuse_pass"])


Expand Down

0 comments on commit 1a14d01

Please sign in to comment.