Skip to content

Commit

Permalink
Merge pull request #229 from PINTO0309/imp_io
Browse files Browse the repository at this point in the history
`-coion / --copy_onnx_input_output_names_to_tflite` output tflite with options so that the signature is embedded in the tflite by default.
  • Loading branch information
PINTO0309 authored Mar 5, 2023
2 parents a82108d + fa92a29 commit fa5673c
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 27 deletions.
29 changes: 3 additions & 26 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ Video speed is adjusted approximately 50 times slower than actual speed.
$ docker run --rm -it \
-v `pwd`:/workdir \
-w /workdir \
ghcr.io/pinto0309/onnx2tf:1.7.17
ghcr.io/pinto0309/onnx2tf:1.7.18

or

Expand Down Expand Up @@ -136,7 +136,7 @@ $ onnx2tf -i mobilenetv2-12.onnx -ois input:1,3,224,224 -cotof -cotoa 1e-1
![image](https://user-images.githubusercontent.com/33194443/216901668-5fdb1e38-8670-46a4-b4b9-8a774fa7545e.png)
![Kazam_screencast_00108_](https://user-images.githubusercontent.com/33194443/212460284-f3480105-4d94-4519-94dc-320d641f5647.gif)

If you want to match tflite's input/output OP names and the order of input/output OPs with ONNX, you can use the `interpreter.get_signature_runner()` to infer this after using the `-osd` / `--output_signaturedefs` option to output `saved_model`. This workaround has already been available since a much earlier version of onnx2tf. Ref: https://github.com/PINTO0309/onnx2tf/pull/185
If you want to match tflite's input/output OP names and the order of input/output OPs with ONNX, you can use the `interpreter.get_signature_runner()` to infer this after using the `-coion` / `--copy_onnx_input_output_names_to_tflite` option to output tflite file. See: https://github.com/PINTO0309/onnx2tf/issues/228
```python
import torch
import onnxruntime
Expand Down Expand Up @@ -177,28 +177,10 @@ print("[ONNX] Model Predictions:", onnx_output)
onnx2tf.convert(
input_onnx_file_path="model.onnx",
output_folder_path="model.tf",
output_signaturedefs=True,
copy_onnx_input_output_names_to_tflite=True,
non_verbose=True,
)

# Let's check TensorFlow model
tf_model = tf.saved_model.load("model.tf")
tf_output = tf_model.signatures["serving_default"](
x=tf.constant((10,), dtype=tf.int64),
y=tf.constant((2,), dtype=tf.int64),
)
print("[TF] Model Predictions:", tf_output)
# Rerun TFLite conversion but from saved model
converter = tf.lite.TFLiteConverter.from_saved_model("model.tf")
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
tf_lite_model = converter.convert()
with open("model.tf/model_float32.tflite", "wb") as f:
f.write(tf_lite_model)
# Now, test the newer TFLite model
interpreter = tf.lite.Interpreter(model_path="model.tf/model_float32.tflite")
tf_lite_model = interpreter.get_signature_runner()
Expand All @@ -224,11 +206,6 @@ print("[TFLite] Model Predictions:", tt_lite_output)
array(12, dtype=int64),
array(8, dtype=int64)
]
[TF] Model Predictions:
{
'add': <tf.Tensor: shape=(1,), dtype=int64, numpy=array([12])>,
'sub': <tf.Tensor: shape=(1,), dtype=int64, numpy=array([8])>
}
[TFLite] Model Predictions:
{
'add': array([12]),
Expand Down
2 changes: 1 addition & 1 deletion onnx2tf/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from onnx2tf.onnx2tf import convert, main

__version__ = '1.7.17'
__version__ = '1.7.18'
53 changes: 53 additions & 0 deletions onnx2tf/utils/common_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3538,6 +3538,59 @@ def rewrite_tflite_inout_opname(
for idx, flat_output_info in enumerate(flat_output_infos):
flat_output_info['name'] = onnx_output_names[idx]

# make signature_defs
"""
"signature_defs": [
{
"inputs": [
{
"name": "input",
"tensor_index": 0
}
],
"outputs": [
{
"name": "boxes",
"tensor_index": 208
},
{
"name": "scores",
"tensor_index": 190
}
],
"signature_key": "serving_default",
"subgraph_index": 0
}
]
"""
signature_defs = {}
# signature_defs_inputs
signature_defs_inputs = []
for idx, flat_input_info in enumerate(flat_input_infos):
signature_defs_inputs.append(
{
'name': onnx_input_names[idx],
'tensor_index': flat_input_info['buffer'] - 1,
}
)
signature_defs['inputs'] = signature_defs_inputs
# signature_defs_outputs
signature_defs_outputs = []
for idx, flat_output_info in enumerate(flat_output_infos):
signature_defs_outputs.append(
{
'name': onnx_output_names[idx],
'tensor_index': flat_output_info['buffer'] - 1,
}
)
signature_defs['outputs'] = signature_defs_outputs
# signature_defs_inputs
signature_defs['signature_key'] = 'serving_default'
# subgraph_index
signature_defs['subgraph_index'] = 0
# update json
flat_json['signature_defs'] = [signature_defs]

if flat_json is not None:
with open(json_file_path, 'w') as f:
json.dump(flat_json, f)
Expand Down

0 comments on commit fa5673c

Please sign in to comment.