-
-
Save tarun-ssharma/6bbf1ffe1f1276a9603206a177c8fe0c to your computer and use it in GitHub Desktop.
Revisions
-
tarun-ssharma revised this gist
Apr 22, 2022 . 2 changed files with 34 additions and 44 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -1,44 +0,0 @@ This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,34 @@ ``` model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=1) model.evaluate(x_test, y_test, verbose=2) image_shape = (28, 28) def representative_dataset_gen(): num_calibration_images = 10 for i in range(num_calibration_images): image = tf.random.normal([1] + list(image_shape)) yield [image] converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.representative_dataset = representative_dataset_gen converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 tflite_quant_model = converter.convert() open("coral8.tflite", "wb").write(tflite_quant_model) ``` -
tarun-ssharma revised this gist
Apr 22, 2022 . 1 changed file with 43 additions and 9 deletions.There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -1,10 +1,44 @@ > ``` > $ sudo edgetpu_compiler coral.tflite > > Edge TPU Compiler version 2.0.267685300 > Invalid model: coral.tflite > Model not quantized > ``` > > P.S. I found solution > > ``` > model = tf.keras.models.Sequential([ > tf.keras.layers.Flatten(input_shape=(28, 28)), > tf.keras.layers.Dense(128, activation='relu'), > tf.keras.layers.Dropout(0.2), > tf.keras.layers.Dense(10, activation='softmax') > ]) > > model.compile(optimizer='adam', > loss='sparse_categorical_crossentropy', > metrics=['accuracy']) > > model.fit(x_train, y_train, epochs=1) > model.evaluate(x_test, y_test, verbose=2) > > image_shape = (28, 28) > def representative_dataset_gen(): > num_calibration_images = 10 > for i in range(num_calibration_images): > image = tf.random.normal([1] + list(image_shape)) > yield [image] > > converter = tf.lite.TFLiteConverter.from_keras_model(model) > > converter.optimizations = [tf.lite.Optimize.DEFAULT] > converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] > converter.representative_dataset = representative_dataset_gen > converter.inference_input_type = tf.uint8 > converter.inference_output_type = tf.uint8 > tflite_quant_model = converter.convert() > > open("coral8.tflite", "wb").write(tflite_quant_model) > ``` -
dansitu created this gist
Mar 14, 2019 .There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters. Learn more about bidirectional Unicode charactersOriginal file line number Diff line number Diff line change @@ -0,0 +1,10 @@ # Load TensorFlow import tensorflow as tf # Set up the converter converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] # Perform conversion and output file tflite_quant_model = converter.convert() output_dir.write_bytes(tflite_quant_model)