From ca3522a6e0e230fb4cb8e57336118f221c46492e Mon Sep 17 00:00:00 2001 From: Lucien Date: Thu, 7 Nov 2019 18:47:05 +0900 Subject: [PATCH 1/4] update the path in README --- lmnet/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmnet/README.md b/lmnet/README.md index e3ecaebac..f63f18a29 100644 --- a/lmnet/README.md +++ b/lmnet/README.md @@ -133,7 +133,7 @@ Options: ``` `--network` and `--dataset` option will override config on-the-fly. -If you'd like to use your own custom config, please refer to `configs/example/classification.py` before your training. See also [Config specification](docs/specification/config.md). +If you'd like to use your own custom config, please refer to `configs/example/classification.py` before your training. See also [Config specification](../docs/specification/config.md). To run training in Docker, read the [lmnet in Docker](docs/docker/README.md) page. ## Saving model and training results From 00eb74953da9492f673586b55af53d41c7244384 Mon Sep 17 00:00:00 2001 From: Lucien Date: Mon, 11 Nov 2019 12:14:53 +0900 Subject: [PATCH 2/4] the size of input image should be 32x32, see https://www.cs.toronto.edu/~kriz/cifar.html --- lmnet/configs/example/classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmnet/configs/example/classification.py b/lmnet/configs/example/classification.py index 5418f004e..4e250ad90 100644 --- a/lmnet/configs/example/classification.py +++ b/lmnet/configs/example/classification.py @@ -41,7 +41,7 @@ NETWORK_CLASS = LmnetV0Quantize DATASET_CLASS = Cifar10 -IMAGE_SIZE = [28, 28] +IMAGE_SIZE = [32, 32] BATCH_SIZE = 32 DATA_FORMAT = "NHWC" TASK = Tasks.CLASSIFICATION From b0b88da9b3c8bfb99bd71669e6d672dd065ea1c6 Mon Sep 17 00:00:00 2001 From: Lucien Date: Mon, 11 Nov 2019 12:16:43 +0900 Subject: [PATCH 3/4] the explanation about max_to_keep=0 does not make sense, and see https://github.com/tensorflow/tensorflow/issues/21044 --- lmnet/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lmnet/README.md b/lmnet/README.md index f63f18a29..5897e2f38 100644 --- a/lmnet/README.md +++ b/lmnet/README.md @@ -213,7 +213,7 @@ Currently, this feature is implemented for quantized classification of cifar10 a If you don't want to use this feature, set TRAIN_VALIDATION_SAVING_SIZE to zero. The KEEP_CHECKPOINT_MAX is equivalent to 'max_to_keep' of tensorflow train.Saver parameter which indicates the maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. -If None or 0, no checkpoints are deleted from the filesystem but only the last one is kept in the checkpoint file. Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) +If None or 0, no checkpoints but only the last one is kept in the checkpoint file. Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) To apply this feature to another dataset, the dataset file should define another available subset called train_validation_saving, which is split from the original train dataset in the dataset file. Also a dataset parameter TRAIN_VALIDATION_SAVING_SIZE should be included in the config file. From 0e18423c2788dbeac4addfdb0b1348de8f58a063 Mon Sep 17 00:00:00 2001 From: Lucien Date: Tue, 12 Nov 2019 18:43:07 +0900 Subject: [PATCH 4/4] replace old config in example with the latest one. --- lmnet/README.md | 4 +- lmnet/configs/example/classification.py | 75 ++++++++++++------------- 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/lmnet/README.md b/lmnet/README.md index 5897e2f38..30eeb0e1c 100644 --- a/lmnet/README.md +++ b/lmnet/README.md @@ -225,7 +225,7 @@ Exporting a trained model to proto buffer files and meta config yaml. In the case with `images` option, create each layer output value npy files in `export/{restore_path}/{image_size}/{image_name}/**.npy` for debug. * Load config file from saved experiment dir. -* Export config file to yaml. See also [Config specification](docs/specification/config.md). +* Export config file to yaml. See also [Config specification](../docs/specification/config.md). * `config.yaml` can be used for training and evaluation in python. i.e. [classification.yaml](configs/example/classification.yaml) is exported from [classification.py](configs/example/classification.py) * `meta.yaml` include only few parameter for application such as demo. i.e. [classification_meta.yaml](configs/example/classification_meta.yaml) is exported from [classification.py](configs/example/classification.py) * Save the model protocol buffer files (tf) for DLK converter. @@ -328,7 +328,7 @@ Save the predictions npy, json, images results to output dir. * json: `{output_dir}/json/{batch number}.json` * images: `{output_dir}/images/{some type}/{input image file name}` -The output predictions Tensor(npy) and json format depends on task type. Plsease see [Output Data Specification](docs/specification/output_data.md). +The output predictions Tensor(npy) and json format depends on task type. Plsease see [Output Data Specification](../docs/specification/output_data.md). ``` python3 executor/predict.py -h diff --git a/lmnet/configs/example/classification.py b/lmnet/configs/example/classification.py index 4e250ad90..7d199a538 100644 --- a/lmnet/configs/example/classification.py +++ b/lmnet/configs/example/classification.py @@ -9,7 +9,7 @@ # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# WITH WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= @@ -17,71 +17,69 @@ import tensorflow as tf from lmnet.common import Tasks +from lmnet.networks.classification.lmnet_v1 import LmnetV1Quantize from lmnet.datasets.cifar10 import Cifar10 -from lmnet.networks.classification.lmnet_v0 import LmnetV0Quantize from lmnet.data_processor import Sequence from lmnet.pre_processor import ( Resize, - PerImageStandardization, + DivideBy255, ) from lmnet.data_augmentor import ( - Brightness, - Color, - Contrast, + Crop, FlipLeftRight, - Hue, + Pad, ) from lmnet.quantizations import ( binary_mean_scaling_quantizer, linear_mid_tread_half_quantizer, ) -IS_DEBUG = True +IS_DEBUG = False -NETWORK_CLASS = LmnetV0Quantize +NETWORK_CLASS = LmnetV1Quantize DATASET_CLASS = Cifar10 IMAGE_SIZE = [32, 32] -BATCH_SIZE = 32 +BATCH_SIZE = 100 DATA_FORMAT = "NHWC" TASK = Tasks.CLASSIFICATION CLASSES = DATASET_CLASS.classes +MAX_STEPS = 100000 +SAVE_CHECKPOINT_STEPS = 1000 KEEP_CHECKPOINT_MAX = 5 -MAX_EPOCHS = 1 # MAX_STEPS = 1561 -SAVE_CHECKPOINT_STEPS = 100 -TEST_STEPS = 100 -SUMMARISE_STEPS = 10 - - +TEST_STEPS = 1000 +SUMMARISE_STEPS = 100 # pretrain IS_PRETRAIN = False -PRETRAIN_VARS = [ - "conv1/kernel:", - "conv1/bias:", - "conv2/kernel:", - "conv2/bias:", - "conv3/kernel:", - "conv3/bias:", - "conv4/kernel:", - "conv4/bias:", - "conv5/kernel:", - "conv5/bias:", - "conv6/kernel:", - "conv6/bias:", -] -PRETRAIN_DIR = "saved/lmnet_0.01_caltech101/checkpoints" -PRETRAIN_FILE = "save.ckpt-99001" +PRETRAIN_VARS = [] +PRETRAIN_DIR = "" +PRETRAIN_FILE = "" + + +# for debug +# MAX_STEPS = 10 +# BATCH_SIZE = 31 +# SAVE_CHECKPOINT_STEPS = 2 +# TEST_STEPS = 10 +# SUMMARISE_STEPS = 2 +# IS_DEBUG = True PRE_PROCESSOR = Sequence([ Resize(size=IMAGE_SIZE), - PerImageStandardization() + DivideBy255() ]) POST_PROCESSOR = None NETWORK = EasyDict() -NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer -NETWORK.OPTIMIZER_KWARGS = {"learning_rate": 0.001} +NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer +NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9} +NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant +step_per_epoch = int(50000 / BATCH_SIZE) +NETWORK.LEARNING_RATE_KWARGS = { + "values": [0.01, 0.001, 0.0001, 0.00001], + "boundaries": [step_per_epoch * 50, step_per_epoch * 100, step_per_epoch * 150], +} NETWORK.IMAGE_SIZE = IMAGE_SIZE NETWORK.BATCH_SIZE = BATCH_SIZE NETWORK.DATA_FORMAT = DATA_FORMAT @@ -100,9 +98,8 @@ DATASET.DATA_FORMAT = DATA_FORMAT DATASET.PRE_PROCESSOR = PRE_PROCESSOR DATASET.AUGMENTOR = Sequence([ + Pad(2), + Crop(size=IMAGE_SIZE), FlipLeftRight(), - Brightness((0.75, 1.25)), - Color((0.75, 1.25)), - Contrast((0.75, 1.25)), - Hue((-10, 10)), ]) +DATASET.TRAIN_VALIDATION_SAVING_SIZE = 5000