Skip to content

Commit

Permalink
Merge pull request #148 from PINTO0309/h5_export_fix
Browse files Browse the repository at this point in the history
Fixed Abort problem on Keras (.h5) output when using `np.dtype`
  • Loading branch information
PINTO0309 authored Jan 23, 2023
2 parents 399af68 + e9bca6b commit 51ba3b4
Show file tree
Hide file tree
Showing 28 changed files with 184 additions and 68 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
$ docker run --rm -it \
-v `pwd`:/workdir \
-w /workdir \
ghcr.io/pinto0309/onnx2tf:1.5.29
ghcr.io/pinto0309/onnx2tf:1.5.30
or
Expand Down
2 changes: 1 addition & 1 deletion onnx2tf/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from onnx2tf.onnx2tf import convert, main

__version__ = '1.5.29'
__version__ = '1.5.30'
10 changes: 7 additions & 3 deletions onnx2tf/ops/ArgMax.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -114,7 +115,8 @@ def make_node(
argmaxed_tensor = tf.math.argmax(
input=reversed_tensor,
axis=axis,
output_type=dtype,
output_type=NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype,
name=f'{graph_node.name}_argmax',
)
if keepdims:
Expand Down Expand Up @@ -146,7 +148,8 @@ def make_node(
input_tensor=reversed_tensor,
original_shape=graph_node.inputs[0].shape,
axis=axis,
output_type=dtype,
output_type=NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype,
keepdims=keepdims,
replace_argmax_to_fused_argmax_and_indicies_is_int64=replace_argmax_to_fused_argmax_and_indicies_is_int64,
replace_argmax_to_fused_argmax_and_indicies_is_float32=replace_argmax_to_fused_argmax_and_indicies_is_float32,
Expand All @@ -156,7 +159,8 @@ def make_node(
argmaxed_tensor = tf.math.argmax(
input=reversed_tensor,
axis=axis,
output_type=dtype,
output_type=NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype,
name=f'{graph_node.name}_argmax',
)
if keepdims:
Expand Down
4 changes: 3 additions & 1 deletion onnx2tf/ops/ArgMin.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -96,7 +97,8 @@ def make_node(
argmined_tensor = tf.math.argmin(
input=reversed_tensor,
axis=axis,
output_type=dtype,
output_type=NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype,
name=f'{graph_node.name}_argmin',
)
if keepdims:
Expand Down
15 changes: 11 additions & 4 deletions onnx2tf/ops/Clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -120,6 +121,8 @@ def make_node(
max_value = np.asarray([max_value])

tf_op_type = None
features_dtype = NUMPY_DTYPES_TO_TF_DTYPES[features.dtype] \
if isinstance(features.dtype, np.dtype) else features.dtype
if (isinstance(min_value, np.ndarray) or isinstance(min_value, float)) and min_value == 0.0 \
and (isinstance(max_value, np.ndarray) or isinstance(max_value, float)) and max_value == 6.0:
tf_layers_dict[graph_node_output.name]['tf_node'] = \
Expand All @@ -136,24 +139,28 @@ def make_node(
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.clip_by_value(
t=features,
clip_value_min=min_value,
clip_value_max=max_value,
clip_value_min=tf.convert_to_tensor(min_value, dtype=features_dtype) \
if isinstance(min_value, np.ndarray) else min_value,
clip_value_max=tf.convert_to_tensor(max_value, dtype=features_dtype) \
if isinstance(max_value, np.ndarray) else max_value,
)
tf_op_type = tf.clip_by_value
elif (isinstance(min_value, np.ndarray) and min_value.shape is not None) \
and (max_value is None or max_value.shape is None):
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.maximum(
x=features,
y=min_value,
y=tf.convert_to_tensor(min_value, dtype=features_dtype) \
if isinstance(min_value, np.ndarray) else min_value,
)
tf_op_type = tf.maximum
elif (min_value is None or min_value.shape is None) \
and (max_value is not None and max_value.shape is not None):
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.minimum(
x=features,
y=max_value,
y=tf.convert_to_tensor(max_value, dtype=features_dtype) \
if isinstance(max_value, np.ndarray) else max_value,
)
tf_op_type = tf.minimum

Expand Down
5 changes: 1 addition & 4 deletions onnx2tf/ops/Constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@
import tensorflow as tf
from onnx import numpy_helper
import onnx_graphsurgeon as gs
from onnx2tf.utils.enums import (
ONNX_DTYPES_TO_TF_DTYPES,
NUMPY_DTYPES_TO_TF_DTYPES,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES
from onnx2tf.utils.common_functions import (
print_node_info,
make_tf_node_info,
Expand Down
8 changes: 6 additions & 2 deletions onnx2tf/ops/EyeLike.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -71,6 +72,9 @@ def make_node(
'dtype': dtype,
}

output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype

# Generation of TF OP
if None not in input_tensor_shape:
max_eye_shape_ub = input_tensor_shape[1] \
Expand All @@ -84,7 +88,7 @@ def make_node(
tensor = tf.eye(
eye_shape,
num_columns=eye_shape,
dtype=dtype,
dtype=output_dtype,
)
if offset > 0:
tb_paddings = [
Expand Down Expand Up @@ -129,7 +133,7 @@ def create_nodes(inp, offset):
tensor = tf.eye(
eye_shape,
num_columns=eye_shape,
dtype=dtype,
dtype=output_dtype,
)
if offset > 0:
tb_paddings = [
Expand Down
15 changes: 12 additions & 3 deletions onnx2tf/ops/Gather.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -73,12 +74,20 @@ def make_node(
kwargs['optimization_for_gpu_delegate']

# tensorflow gather supports only positive indices
out_dtype = NUMPY_DTYPES_TO_TF_DTYPES[indices.dtype] \
if isinstance(indices.dtype, np.dtype) else indices.dtype
if not optimization_for_gpu_delegate:
cond = tf.cast(indices < 0, dtype=indices.dtype)
indices = tf.cast(indices + cond * tf.shape(input_tensor, out_type=indices.dtype)[axis], dtype=indices.dtype)
cond = tf.cast(indices < 0, dtype=out_dtype)
indices = tf.cast(
indices + cond * tf.shape(input_tensor, out_type=out_dtype)[axis],
dtype=out_dtype,
)
else:
cond = indices < 0
indices = indices + tf.cast(tf.where(cond, 1, 0) * tf.shape(input_tensor, out_type=indices.dtype)[axis], dtype=indices.dtype)
indices = indices + tf.cast(
tf.where(cond, 1, 0) * tf.shape(input_tensor, out_type=out_dtype)[axis],
dtype=out_dtype,
)

# Preserving Graph Structure (Dict)
tf_layers_dict[graph_node_output.name] = {
Expand Down
7 changes: 6 additions & 1 deletion onnx2tf/ops/GatherND.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
post_process_transpose,
)
from onnx2tf.utils.colors import Color
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -134,7 +135,11 @@ def make_node(
)
mul = tf.math.multiply(
indices_tensor,
tf.cast(axis_step, dtype=indices_tensor.dtype),
tf.cast(
axis_step,
dtype= NUMPY_DTYPES_TO_TF_DTYPES[indices_tensor.dtype] \
if isinstance(indices_tensor.dtype, np.dtype) else indices_tensor.dtype,
),
)
indices_flat = tf.reduce_sum(
mul,
Expand Down
4 changes: 3 additions & 1 deletion onnx2tf/ops/Gemm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -87,7 +88,8 @@ def make_node(
**kwargs,
)

input_tensor_x_dtype = x.dtype
input_tensor_x_dtype = NUMPY_DTYPES_TO_TF_DTYPES[x.dtype] \
if isinstance(x.dtype, np.dtype) else x.dtype
x = tf.keras.layers.Flatten()(x)
# The Flatten API changes data type from tf.float64 to tf.float32
# so we need the following line to get the original type back
Expand Down
4 changes: 3 additions & 1 deletion onnx2tf/ops/Hardmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -121,7 +122,8 @@ def make_node(
axis=-1,
),
depth=depth,
dtype=x.dtype,
dtype=NUMPY_DTYPES_TO_TF_DTYPES[x.dtype] \
if isinstance(x.dtype, np.dtype) else x.dtype,
name=graph_node.name,
)

Expand Down
6 changes: 4 additions & 2 deletions onnx2tf/ops/If.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,8 @@ def make_node(
tf_layers_dict[output.name]['tf_node'] = \
tf.constant(
output.values,
dtype=NUMPY_DTYPES_TO_TF_DTYPES[output.values.dtype],
dtype=NUMPY_DTYPES_TO_TF_DTYPES[output.values.dtype] \
if isinstance(output.values.dtype, np.dtype) else output.values.dtype,
)
then_branch_ops = []
for then_branch_graph_output in then_branch_graph_outputs:
Expand Down Expand Up @@ -127,7 +128,8 @@ def make_node(
tf_layers_dict[output.name]['tf_node'] = \
tf.constant(
output.values,
dtype=NUMPY_DTYPES_TO_TF_DTYPES[output.values.dtype],
dtype=NUMPY_DTYPES_TO_TF_DTYPES[output.values.dtype] \
if isinstance(output.values.dtype, np.dtype) else output.values.dtype,
)
else_branch_ops = []
for else_branch_graph_output in else_branch_graph_outputs:
Expand Down
8 changes: 6 additions & 2 deletions onnx2tf/ops/MatMul.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -82,12 +83,15 @@ def make_node(
**kwargs,
)

output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype

try:
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.matmul(
a=input_tensor_1,
b=input_tensor_2,
output_type=dtype,
output_type=output_dtype,
name=graph_node.name,
)
except Exception as ex1:
Expand All @@ -102,7 +106,7 @@ def make_node(
tf.matmul(
a=tf.transpose(a=input_tensor_1, perm=tensor_1_candidate_for_transposition),
b=tf.transpose(a=input_tensor_2, perm=tensor_2_candidate_for_transposition),
output_type=dtype,
output_type=output_dtype,
name=graph_node.name,
)
break
Expand Down
6 changes: 5 additions & 1 deletion onnx2tf/ops/MatMulInteger.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -125,11 +126,14 @@ def get_a_zero_point(a_zero_point):
b_zero_point = tf.cast(b_zero_point, tf.int32)
casted_input_tensor_2 = tf.subtract(casted_input_tensor_2, b_zero_point)

output_dtype = NUMPY_DTYPES_TO_TF_DTYPES[dtype] \
if isinstance(dtype, np.dtype) else dtype

tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.matmul(
a=casted_input_tensor_1,
b=casted_input_tensor_2,
output_type=dtype,
output_type=output_dtype,
name=graph_node.name,
)

Expand Down
7 changes: 6 additions & 1 deletion onnx2tf/ops/Neg.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
pre_process_transpose,
post_process_transpose,
)
from onnx2tf.utils.enums import NUMPY_DTYPES_TO_TF_DTYPES


@print_node_info
Expand Down Expand Up @@ -88,7 +89,11 @@ def make_node(
tf_layers_dict[graph_node_output.name]['tf_node'] = \
tf.math.multiply(
x=input_tensor,
y=tf.cast(-1, dtype=input_tensor.dtype),
y=tf.cast(
-1,
dtype=NUMPY_DTYPES_TO_TF_DTYPES[input_tensor.dtype] \
if isinstance(input_tensor.dtype, np.dtype) else input_tensor.dtype,
),
name=graph_node.name,
)

Expand Down
24 changes: 18 additions & 6 deletions onnx2tf/ops/NonMaxSuppression.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,27 @@ def non_max_suppression(
name=None,
):
with ops.name_scope(name, 'non_max_suppression'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
selected_indices, num_valid = gen_image_ops.non_max_suppression_v4(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
max_output_size=max_output_size \
if not isinstance(max_output_size, np.ndarray) \
else tf.convert_to_tensor(
value=max_output_size,
name='max_output_size'
),
iou_threshold=iou_threshold \
if not isinstance(iou_threshold, np.ndarray) \
else tf.convert_to_tensor(
value=iou_threshold,
name='iou_threshold',
),
score_threshold=score_threshold \
if not isinstance(score_threshold, np.ndarray) \
else tf.convert_to_tensor(
value=score_threshold,
name='score_threshold',
),
pad_to_max_output_size=False,
)
return selected_indices[:num_valid]
Expand Down
Loading

0 comments on commit 51ba3b4

Please sign in to comment.