Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoScan] Add a lot of 0D test #10243

Merged
merged 9 commits into from
Apr 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions lite/kernels/host/gaussian_random_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,15 @@ void GaussRandomCompute::Run() {
float gstd = param.gauss_std;

// output shape
if (param.shape.size() > 0) {
if (param.ShapeTensorList.size() > 0) {
std::vector<int64_t> tmp{};
for (size_t i = 0; i < param.ShapeTensorList.size(); ++i) {
auto tmp_tensor_ptr = param.ShapeTensorList[i]->data<int>();
tmp.push_back(static_cast<int64_t>(tmp_tensor_ptr[0]));
}
DDimLite dims(tmp);
param.Out->Resize(dims);
} else if (param.shape.size() > 0) {
DDimLite dims(param.shape);
param.Out->Resize(dims);
} else if (param.ShapeTensor != nullptr) {
Expand All @@ -67,15 +75,8 @@ void GaussRandomCompute::Run() {
}
DDimLite dims(tmp);
param.Out->Resize(dims);
} else if (param.ShapeTensorList.size() > 0) {
std::vector<int64_t> tmp{};
for (size_t i = 0; i < param.ShapeTensorList.size(); ++i) {
auto tmp_tensor_ptr = param.ShapeTensorList[i]->data<int>();
tmp.push_back(static_cast<int64_t>(tmp_tensor_ptr[0]));
}
DDimLite dims(tmp);
param.Out->Resize(dims);
}

auto data = param.Out->mutable_data<float>();
int size = param.Out->numel();
std::normal_distribution<float> dist(mean, gstd);
Expand Down
2 changes: 1 addition & 1 deletion lite/operators/flatten_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ bool FlattenOp::InferShapeImpl() const {
out_shape[1] = inner;

param_.output->Resize(out_shape);
if (x_dims[0] == out_shape[0]) {
if (x_dims.size() > 0 && x_dims[0] == out_shape[0]) {
param_.output->set_lod(param_.x->lod());
}
return true;
Expand Down
13 changes: 2 additions & 11 deletions lite/operators/tile_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,9 @@ bool TileOp::InferShapeImpl() const {
repeat_times = param_.repeat_times;
}
param_.repeat_times = repeat_times;
if (repeat_times.size() == 0) {
if (repeat_times.size() == 0 && x_dims.size() > 0) {
repeat_times = std::vector<int>(x_dims.size(), -1);
}
CHECK_GE(x_dims.size(), 1)
<< "The rank of the input 'x' for tile op "
<< "must be positive integers, but the value received is "
<< x_dims.size();

CHECK_LE(x_dims.size(), 6)
<< "The rank of the input 'x' for tile op "
Expand All @@ -75,11 +71,6 @@ bool TileOp::InferShapeImpl() const {
<< "must not be greater than 6, but the value received is "
<< repeat_times.size();

CHECK_GE(repeat_times.size(), 1)
<< "The size of the shape of input 'repeat_times' for tile op "
<< "must be positive integers, but the value received is "
<< repeat_times.size();

auto out_rank =
std::max(static_cast<size_t>(x_dims.size()), repeat_times.size());

Expand All @@ -104,7 +95,7 @@ bool TileOp::InferShapeImpl() const {
}
}
out->Resize(out_shape);
if (out_shape[0] == x_dims[0]) {
if (out_shape.size() > 0 && x_dims.size() > 0 && out_shape[0] == x_dims[0]) {
param_.X->set_lod(param_.Out->lod());
}
return true;
Expand Down
6 changes: 3 additions & 3 deletions lite/tests/unittest_py/model_test/run_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,15 @@
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "ARM":
command = "python3.8 {}/model_test_base.py --target=ARM --url={} --model_name={} --file_name={} {}".format(
command = "python3.9 {}/model_test_base.py --target=ARM --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "OpenCL":
command = "python3.8 {}/model_test_base.py --target=OpenCL --url={} --model_name={} --file_name={} {}".format(
command = "python3.9 {}/model_test_base.py --target=OpenCL --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
elif args.target == "Metal":
command = "python3.8 {}/model_test_base.py --target=Metal --url={} --model_name={} --file_name={} {}".format(
command = "python3.9 {}/model_test_base.py --target=Metal --url={} --model_name={} --file_name={} {}".format(
os.getcwd(), config[URL], config[MODEL_NAME],
config[FILE_NAME], input_info_str)
print(command)
Expand Down
16 changes: 13 additions & 3 deletions lite/tests/unittest_py/op/test_abs_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,11 @@ def is_program_valid(self,
return True

def sample_program_configs(self, draw):
in_shape = draw(
in_shape_tmp = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=1, max_size=4))
in_shape = draw(st.sampled_from([in_shape_tmp, []]))

abs_op = OpConfig(
type="abs",
Expand All @@ -82,10 +83,19 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["abs"], (1e-5, 1e-5)

def add_ignore_pass_case(self):
pass
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, min_success_num=25, max_examples=25)
self.run_and_statis(quant=False, min_success_num=25, max_examples=100)


if __name__ == "__main__":
Expand Down
54 changes: 37 additions & 17 deletions lite/tests/unittest_py/op/test_acos_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,25 +60,36 @@ def generate_data(*args, **kwargs):
high = kwargs["high"]
if "dtype" in kwargs:
dtype = kwargs["dtype"]
if shape == []:
if dtype == "int32":
return np.ones(shape).astype(np.int32)
elif dtype == "int64":
return np.ones(shape).astype(np.int64)
elif dtype == "float32":
return np.random.random(shape).astype(np.float32)
else:
if dtype == "int32":
if low == high:
return low * np.ones(shape).astype(np.int32)
else:
return np.random.randint(low, high,
shape).astype(np.int32)
elif dtype == "int64":
if low == high:
return low * np.ones(shape).astype(np.int64)
else:
return np.random.randint(low, high,
shape).astype(np.int64)
elif dtype == "float32":
return (
high - low
) * np.random.random(shape).astype(np.float32) + low

if dtype == "int32":
if low == high:
return low * np.ones(shape).astype(np.int32)
else:
return np.random.randint(low, high, shape).astype(np.int32)
elif dtype == "int64":
if low == high:
return low * np.ones(shape).astype(np.int64)
else:
return np.random.randint(low, high, shape).astype(np.int64)
elif dtype == "float32":
return (high - low
) * np.random.random(shape).astype(np.float32) + low

in_shape = draw(
in_shape_tmp = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
in_shape = draw(st.sampled_from([in_shape_tmp, []]))

acos_op = OpConfig(
type="acos",
Expand All @@ -99,10 +110,19 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["acos"], (1e-5, 1e-5)

def add_ignore_pass_case(self):
pass
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
self.run_and_statis(quant=False, max_examples=100)


if __name__ == "__main__":
Expand Down
16 changes: 15 additions & 1 deletion lite/tests/unittest_py/op/test_argsort_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ def sample_program_configs(self, draw):
axis = draw(st.integers(min_value=-1, max_value=3))
assume(axis < len(in_shape))

in_shape = draw(st.sampled_from([in_shape]))
if in_shape == []:
axis = 0

arg_sort_op = OpConfig(
type="argsort",
inputs={"X": ["input_data"]},
Expand All @@ -64,7 +68,17 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["argsort"], (1e-5, 1e-5)

def add_ignore_pass_case(self):
pass
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
#if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLE_NOT_SUPPORT,
"ValueError: (InvalidArgument) axis(0) must be less than num_dims(0) in Paddle2.4."
)

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
Expand Down
53 changes: 37 additions & 16 deletions lite/tests/unittest_py/op/test_asin_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,24 +61,36 @@ def generate_data(*args, **kwargs):
if "dtype" in kwargs:
dtype = kwargs["dtype"]

if dtype == "int32":
if low == high:
return low * np.ones(shape).astype(np.int32)
else:
return np.random.randint(low, high, shape).astype(np.int32)
elif dtype == "int64":
if low == high:
return low * np.ones(shape).astype(np.int64)
else:
return np.random.randint(low, high, shape).astype(np.int64)
elif dtype == "float32":
return (high - low
) * np.random.random(shape).astype(np.float32) + low
if shape == []:
if dtype == "int32":
return np.ones(shape).astype(np.int32)
elif dtype == "int64":
return np.ones(shape).astype(np.int64)
elif dtype == "float32":
return np.random.random(shape).astype(np.float32)
else:
if dtype == "int32":
if low == high:
return low * np.ones(shape).astype(np.int32)
else:
return np.random.randint(low, high,
shape).astype(np.int32)
elif dtype == "int64":
if low == high:
return low * np.ones(shape).astype(np.int64)
else:
return np.random.randint(low, high,
shape).astype(np.int64)
elif dtype == "float32":
return (
high - low
) * np.random.random(shape).astype(np.float32) + low

in_shape = draw(
in_shape_tmp = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
in_shape = draw(st.sampled_from([in_shape_tmp, []]))

asin_op = OpConfig(
type="asin",
Expand All @@ -99,10 +111,19 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["asin"], (1e-5, 1e-5)

def add_ignore_pass_case(self):
pass
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
self.run_and_statis(quant=False, max_examples=100)


if __name__ == "__main__":
Expand Down
13 changes: 12 additions & 1 deletion lite/tests/unittest_py/op/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=1, max_size=4))
min_value=1, max_value=8), min_size=0, max_size=4))
assign_op = OpConfig(
type="assign",
inputs={"X": ["input_data"]},
Expand All @@ -74,6 +74,17 @@ def teller1(program_config, predictor_config):
teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support 'in_shape_size == 1' on nvidia_tensorrt.")

def _teller2(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller2,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)

Expand Down
16 changes: 13 additions & 3 deletions lite/tests/unittest_py/op/test_atan_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,11 @@ def is_program_valid(self,
return True

def sample_program_configs(self, draw):
in_shape = draw(
in_shape_tmp = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
in_shape = draw(st.sampled_from([in_shape_tmp, []]))

atan_op = OpConfig(
type="atan",
Expand All @@ -70,10 +71,19 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["atan"], (1e-5, 1e-5)

def add_ignore_pass_case(self):
pass
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type != TargetType.ARM and target_type != TargetType.Host:
if len(in_x_shape) == 0:
return True

self.add_ignore_check_case(_teller1,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Only test 0D-tensor on CPU(ARM/Host) now.")

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
self.run_and_statis(quant=False, max_examples=100)


if __name__ == "__main__":
Expand Down
10 changes: 1 addition & 9 deletions lite/tests/unittest_py/op/test_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,15 +162,7 @@ def sample_predictor_configs(self):
return self.get_predictor_configs(), ["batch_norm"], (atol, rtol)

def add_ignore_pass_case(self):
def _teller7(program_config, predictor_config):
target_type = predictor_config.target()
if target_type == TargetType.Metal:
return True

self.add_ignore_check_case(
_teller7, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Metal error: failed assertion commit command buffer with uncommitted encoder"
)
pass

def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=250)
Expand Down
Loading