diff --git a/test/cpp/inference/api/trt_dynamic_shape_test.cc b/test/cpp/inference/api/trt_dynamic_shape_test.cc index 80929f10447b8..52336e7e8a541 100644 --- a/test/cpp/inference/api/trt_dynamic_shape_test.cc +++ b/test/cpp/inference/api/trt_dynamic_shape_test.cc @@ -191,6 +191,7 @@ void TestTunedDynamic() { output_t->copy_to_cpu(out_data.data()); }; check_func(predictor_tuned.get()); + predictor_tuned.reset(nullptr); // check tuned_dynamic_shape AnalysisConfig config; diff --git a/test/ir/inference/test_trt_convert_lookup_table.py b/test/ir/inference/test_trt_convert_lookup_table.py index e1fb64bcdf545..b7cf7d657d7a0 100644 --- a/test/ir/inference/test_trt_convert_lookup_table.py +++ b/test/ir/inference/test_trt_convert_lookup_table.py @@ -80,6 +80,7 @@ def generate_input2(dims, attrs: List[Dict[str, Any]]): ) }, outputs=["out_data"], + no_cast_list=["indices"], ) yield program_config diff --git a/test/ir/inference/test_trt_convert_solve.py b/test/ir/inference/test_trt_convert_solve.py index fa86a84e61f19..de70cfacc4e07 100644 --- a/test/ir/inference/test_trt_convert_solve.py +++ b/test/ir/inference/test_trt_convert_solve.py @@ -90,7 +90,7 @@ def clear_dynamic_shape(): yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-3 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/test/legacy_test/test_sparse_fused_attention_op.py b/test/legacy_test/test_sparse_fused_attention_op.py index 68cdd16d4bd12..098f4815b85f3 100644 --- a/test/legacy_test/test_sparse_fused_attention_op.py +++ b/test/legacy_test/test_sparse_fused_attention_op.py @@ -42,6 +42,7 @@ def get_cuda_version(): ) class TestSparseAttentionAPI1(unittest.TestCase): def setUp(self): + paddle.seed(0) self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -134,6 +135,7 @@ def test_dygraph(self): class TestSparseAttentionAPI2(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -144,6 +146,7 @@ def setUp(self): class TestSparseAttentionAPI3(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -154,6 +157,7 @@ def setUp(self): class TestSparseAttentionAPI4(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -164,6 +168,7 @@ def setUp(self): class TestSparseAttentionAPI5(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512