-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel_converter.py
35 lines (28 loc) · 1.2 KB
/
model_converter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import tensorflow as tf
def model_converter(model_path, save_path, quantization=None):
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(model_path) # path to the SavedModel directory
converter.experimental_new_converter = True
converter.allow_custom_ops = True
if quantization is None:
tflite_model = converter.convert()
elif 'int8' in quantization:
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
elif 'float16' in quantization:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
else:
print('undefined quantization')
# Save the model.
with open(save_path, 'wb') as f:
f.write(tflite_model)
if __name__ == '__main__':
model_path = '/home/ning/extens/federated_contrastive/checkpoints/model1'
# save_path = 'model.tflite'
# model_converter(model_path, save_path)
save_path = 'model_int8.tflite'
model_converter(model_path, save_path, 'int8')
save_path = 'model_float16.tflite'
model_converter(model_path, save_path, 'float16')