From 980f31c8f9a19645355d4d4cb420fadb4fa4afaa Mon Sep 17 00:00:00 2001 From: Boadi Samson Date: Mon, 5 Jun 2023 21:53:23 +0200 Subject: [PATCH] Saving quantized model, catching Exceptions when they occur --- advanced_source/static_quantization_tutorial.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/advanced_source/static_quantization_tutorial.py b/advanced_source/static_quantization_tutorial.py index 5e7c21ab46..b113957f4f 100644 --- a/advanced_source/static_quantization_tutorial.py +++ b/advanced_source/static_quantization_tutorial.py @@ -519,7 +519,13 @@ def prepare_data_loaders(data_path): torch.quantization.convert(per_channel_quantized_model, inplace=True) top1, top5 = evaluate(per_channel_quantized_model, criterion, data_loader_test, neval_batches=num_eval_batches) print('Evaluation accuracy on %d images, %2.2f'%(num_eval_batches * eval_batch_size, top1.avg)) -torch.jit.save(torch.jit.script(per_channel_quantized_model), saved_model_dir + scripted_quantized_model_file) +# Saving quantized model, catching Exceptions when they occur +try: + scripted_model = torch.jit.script(per_channel_quantized_model) + torch.jit.save(scripted_model, saved_model_dir + scripted_quantized_model_file) + print("Quantized model saved successfully.") +except Exception as e: + print("Error occurred while saving the model:", str(e)) ###################################################################### # Changing just this quantization configuration method resulted in an increase