diff --git a/tensorflow/lite/micro/examples/micro_speech/train/train_micro_speech_model.ipynb b/tensorflow/lite/micro/examples/micro_speech/train/train_micro_speech_model.ipynb index 4408b1a1110..3a50b0f713b 100644 --- a/tensorflow/lite/micro/examples/micro_speech/train/train_micro_speech_model.ipynb +++ b/tensorflow/lite/micro/examples/micro_speech/train/train_micro_speech_model.ipynb @@ -162,7 +162,6 @@ }, "outputs": [], "source": [ - "%tensorflow_version 1.x\n", "import tensorflow as tf" ] }, @@ -420,7 +419,7 @@ }, "outputs": [], "source": [ - "with tf.Session() as sess:\n", + "with tf.compat.v1.Session() as sess:\n", " float_converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n", " float_tflite_model = float_converter.convert()\n", " float_tflite_model_size = open(FLOAT_MODEL_TFLITE, \"wb\").write(float_tflite_model)\n", @@ -428,8 +427,8 @@ "\n", " converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)\n", " converter.optimizations = [tf.lite.Optimize.DEFAULT]\n", - " converter.inference_input_type = tf.lite.constants.INT8\n", - " converter.inference_output_type = tf.lite.constants.INT8\n", + " converter.inference_input_type = tf.int8\n", + " converter.inference_output_type = tf.int8\n", " def representative_dataset_gen():\n", " for i in range(100):\n", " data, _ = audio_processor.get_data(1, i*1, model_settings,\n", @@ -472,7 +471,7 @@ "def run_tflite_inference(tflite_model_path, model_type=\"Float\"):\n", " # Load test data\n", " np.random.seed(0) # set random seed for reproducible test results.\n", - " with tf.Session() as sess:\n", + " with tf.compat.v1.Session() as sess:\n", " test_data, test_labels = audio_processor.get_data(\n", " -1, 0, model_settings, BACKGROUND_FREQUENCY, BACKGROUND_VOLUME_RANGE,\n", " TIME_SHIFT_MS, 'testing', sess)\n",